From be4135a64eb95b998204ee3db814eb78a9240944 Mon Sep 17 00:00:00 2001 From: fidoriel <49869342+fidoriel@users.noreply.github.com> Date: Mon, 19 May 2025 18:16:58 +0200 Subject: [PATCH 01/24] remove private --- evap/evaluation/models.py | 9 --------- evap/evaluation/tests/test_models.py | 1 - evap/results/tools.py | 4 ++-- .../templates/staff_evaluation_textanswers_section.html | 3 --- evap/staff/views.py | 1 - 5 files changed, 2 insertions(+), 16 deletions(-) diff --git a/evap/evaluation/models.py b/evap/evaluation/models.py index 8e95bf5881..e9b3e8aaee 100644 --- a/evap/evaluation/models.py +++ b/evap/evaluation/models.py @@ -1528,7 +1528,6 @@ class ReviewDecision(models.TextChoices): """ PUBLIC = "PU", _("public") - PRIVATE = "PR", _("private") # This answer should only be displayed to the contributor the question was about DELETED = "DE", _("deleted") UNDECIDED = "UN", _("undecided") @@ -1557,10 +1556,6 @@ class Meta: def will_be_deleted(self): return self.review_decision == self.ReviewDecision.DELETED - @property - def will_be_private(self): - return self.review_decision == self.ReviewDecision.PRIVATE - @property def will_be_public(self): return self.review_decision == self.ReviewDecision.PUBLIC @@ -1571,10 +1566,6 @@ def will_be_public(self): def is_public(self): return self.will_be_public - @property - def is_private(self): - return self.will_be_private - @property def is_reviewed(self): return self.review_decision != self.ReviewDecision.UNDECIDED diff --git a/evap/evaluation/tests/test_models.py b/evap/evaluation/tests/test_models.py index e74f873c6e..70442cf870 100644 --- a/evap/evaluation/tests/test_models.py +++ b/evap/evaluation/tests/test_models.py @@ -366,7 +366,6 @@ def test_textanswers_to_delete_get_deleted_on_publish(self): [ TextAnswer.ReviewDecision.DELETED, TextAnswer.ReviewDecision.PUBLIC, - TextAnswer.ReviewDecision.PRIVATE, ] ), _quantity=3, diff --git a/evap/results/tools.py b/evap/results/tools.py index e4f7abf43b..9b2e02b6d4 100644 --- a/evap/results/tools.py +++ b/evap/results/tools.py @@ -222,7 +222,7 @@ def _get_results_impl(evaluation: Evaluation, *, refetch_related_objects: bool = ((textanswer.contribution_id, textanswer.question_id), textanswer) for contribution in evaluation.contributions.all() for textanswer in contribution.textanswer_set.all() - if textanswer.review_decision in [TextAnswer.ReviewDecision.PRIVATE, TextAnswer.ReviewDecision.PUBLIC] + if textanswer.review_decision == TextAnswer.ReviewDecision.PUBLIC ) racs_per_contribution_question: dict[tuple[int, int], list[RatingAnswerCounter]] = unordered_groupby( @@ -479,7 +479,7 @@ def can_textanswer_be_seen_by( # noqa: PLR0911 textanswer: TextAnswer, view: str, ) -> bool: - assert textanswer.review_decision in [TextAnswer.ReviewDecision.PRIVATE, TextAnswer.ReviewDecision.PUBLIC] + assert textanswer.review_decision == TextAnswer.ReviewDecision.PUBLIC contributor = textanswer.contribution.contributor if view == "public": diff --git a/evap/staff/templates/staff_evaluation_textanswers_section.html b/evap/staff/templates/staff_evaluation_textanswers_section.html index f171cc0041..1dba4c445a 100644 --- a/evap/staff/templates/staff_evaluation_textanswers_section.html +++ b/evap/staff/templates/staff_evaluation_textanswers_section.html @@ -39,9 +39,6 @@ - - - diff --git a/evap/staff/views.py b/evap/staff/views.py index 16e58022c7..ca74d87139 100644 --- a/evap/staff/views.py +++ b/evap/staff/views.py @@ -1753,7 +1753,6 @@ def evaluation_textanswers_update_publish(request): review_decision_for_action = { "publish": TextAnswer.ReviewDecision.PUBLIC, - "make_private": TextAnswer.ReviewDecision.PRIVATE, "delete": TextAnswer.ReviewDecision.DELETED, "unreview": TextAnswer.ReviewDecision.UNDECIDED, } From 3bed114c88cf078410068c1126716116145e5dfb Mon Sep 17 00:00:00 2001 From: fidoriel <49869342+fidoriel@users.noreply.github.com> Date: Mon, 4 Nov 2024 21:53:07 +0100 Subject: [PATCH 02/24] Add average results to exporter --- evap/results/exporters.py | 148 ++++++++++++++++++++------- evap/results/tests/test_exporters.py | 64 ++++++------ evap/staff/views.py | 6 +- 3 files changed, 149 insertions(+), 69 deletions(-) diff --git a/evap/results/exporters.py b/evap/results/exporters.py index 75f04a3188..535081ecf6 100644 --- a/evap/results/exporters.py +++ b/evap/results/exporters.py @@ -112,24 +112,29 @@ def filter_text_and_heading_questions(questions: Iterable[Question]) -> list[Que return filtered_questions @staticmethod - def filter_evaluations( - semesters: Iterable[Semester], - evaluation_states: Iterable[Evaluation.State], - program_ids: Iterable[int], - course_type_ids: Iterable[int], - contributor: UserProfile | None, - include_not_enough_voters: bool, + def filter_evaluations( # noqa: PLR0912 + semesters: Iterable[Semester] | None = None, + evaluation_states: Iterable[Evaluation.State] | None = None, + program_ids: Iterable[int] | None = None, + course_type_ids: Iterable[int] | None = None, + contributor: UserProfile | None = None, + include_not_enough_voters: bool = False, ) -> tuple[list[tuple[Evaluation, OrderedDict[int, list[QuestionResult]]]], list[Questionnaire], bool]: # pylint: disable=too-many-locals course_results_exist = False evaluations_with_results = [] used_questionnaires: set[Questionnaire] = set() - evaluations_filter = Q( - course__semester__in=semesters, - state__in=evaluation_states, - course__programs__in=program_ids, - course__type__in=course_type_ids, - ) + + evaluations_filter = Q() + if semesters: + evaluations_filter &= Q(course__semester__in=semesters) + if evaluation_states: + evaluations_filter &= Q(state__in=evaluation_states) + if program_ids: + evaluations_filter &= Q(course__programs__in=program_ids) + if course_type_ids: + evaluations_filter &= Q(course__type__in=course_type_ids) + if contributor: evaluations_filter = evaluations_filter & ( Q(course__responsibles__in=[contributor]) | Q(contributions__contributor__in=[contributor]) @@ -198,6 +203,8 @@ def write_headings_and_evaluation_info( else: self.write_cell(export_name, "headline") + self.write_cell("Average for this Question", "evaluation") + for evaluation, __ in evaluations_with_results: title = evaluation.full_name if len(semesters) > 1: @@ -208,17 +215,19 @@ def write_headings_and_evaluation_info( self.next_row() self.write_cell(_("Programs"), "bold") + self.write_cell("", "program") for evaluation, __ in evaluations_with_results: self.write_cell("\n".join([d.name for d in evaluation.course.programs.all()]), "program") self.next_row() self.write_cell(_("Course Type"), "bold") + self.write_cell("", "program") for evaluation, __ in evaluations_with_results: self.write_cell(evaluation.course.type.name, "border_left_right") self.next_row() # One more cell is needed for the question column - self.write_empty_row_with_styles(["default"] + ["border_left_right"] * len(evaluations_with_results)) + self.write_empty_row_with_styles(["default"] + ["border_left_right"] * (len(evaluations_with_results) + 1)) def write_overall_results( self, @@ -228,14 +237,17 @@ def write_overall_results( annotated_evaluations = [e for e, __ in evaluations_with_results] self.write_cell(_("Overall Average Grade"), "bold") + self.write_cell("", "border_left_right") averages = (distribution_to_grade(calculate_average_distribution(e)) for e in annotated_evaluations) self.write_row(averages, lambda avg: self.grade_to_style(avg) if avg else "border_left_right") self.write_cell(_("Total voters/Total participants"), "bold") + self.write_cell("", "total_voters") voter_ratios = (f"{e.num_voters}/{e.num_participants}" for e in annotated_evaluations) self.write_row(voter_ratios, style="total_voters") self.write_cell(_("Evaluation rate"), "bold") + self.write_cell("", "evaluation_rate") # round down like in progress bar participant_percentages = ( f"{int((e.num_voters / e.num_participants) * 100) if e.num_participants > 0 else 0}%" @@ -249,17 +261,19 @@ def write_overall_results( # Borders only if there is a course grade below. Offset by one column self.write_empty_row_with_styles( - ["default"] + ["border_left_right" if gt1 else "default" for gt1 in count_gt_1] + ["default", "default"] + ["border_left_right" if gt1 else "default" for gt1 in count_gt_1] ) self.write_cell(_("Evaluation weight"), "bold") - weight_percentages = ( + self.write_cell("") + weight_percentages = tuple( f"{e.weight_percentage}%" if gt1 else None for e, gt1 in zip(annotated_evaluations, count_gt_1, strict=True) ) self.write_row(weight_percentages, lambda s: "evaluation_weight" if s is not None else "default") self.write_cell(_("Course Grade"), "bold") + self.write_cell("") for evaluation, gt1 in zip(annotated_evaluations, count_gt_1, strict=True): if not gt1: self.write_cell() @@ -271,58 +285,118 @@ def write_overall_results( self.next_row() # Same reasoning as above. - self.write_empty_row_with_styles(["default"] + ["border_top" if gt1 else "default" for gt1 in count_gt_1]) + self.write_empty_row_with_styles( + ["default", "default"] + ["border_top" if gt1 else "default" for gt1 in count_gt_1] + ) + + @classmethod + def _calculate_display_result( + cls, questionnaire_id: int, question: Question, results: OrderedDict[int, list[QuestionResult]] + ) -> tuple[float | None, float | None]: + values = [] + count_sum = 0 + approval_count = 0 + + for grade_result in results[questionnaire_id]: + if grade_result.question.id != question.id or not RatingResult.has_answers(grade_result): + continue + + values.append(grade_result.average * grade_result.count_sum) + count_sum += grade_result.count_sum + if grade_result.question.is_yes_no_question: + approval_count += grade_result.approval_count + + if not values: + return None, None + + avg = sum(values) / count_sum + if question.is_yes_no_question: + percent_approval = approval_count / count_sum if count_sum > 0 else 0 + return avg, percent_approval + return avg, None + + @classmethod + def _calculate_display_result_average( + cls, + evaluations_with_results: list[tuple[Evaluation, OrderedDict[int, list[QuestionResult]]]], + questionnaire_id: int, + question: Question, + ) -> tuple[float | None, float | None]: + avg_values = [] + count_avg = 0 + avg_approval = [] + count_approval = 0 + + for __, results in evaluations_with_results: + if ( + results.get(questionnaire_id) is None + ): # ignore all results without the questionaire for average calculation + continue + avg, percent_approval = cls._calculate_display_result(questionnaire_id, question, results) + if avg is not None: + avg_values.append(avg) + count_avg += 1 + if percent_approval is not None: + avg_approval.append(percent_approval) + count_approval += 1 + + return sum(avg_values) / count_avg if count_avg else None, ( + sum(avg_approval) / count_approval if count_approval else None + ) def write_questionnaire( self, questionnaire: Questionnaire, evaluations_with_results: list[tuple[Evaluation, OrderedDict[int, list[QuestionResult]]]], contributor: UserProfile | None, + all_evaluations_with_results: list[tuple[Evaluation, OrderedDict[int, list[QuestionResult]]]], ) -> None: if contributor and questionnaire.type == Questionnaire.Type.CONTRIBUTOR: self.write_cell(f"{questionnaire.public_name} ({contributor.full_name})", "bold") else: self.write_cell(questionnaire.public_name, "bold") + self.write_cell("", "border_left_right") + # first cell of row is printed above self.write_empty_row_with_styles(["border_left_right"] * len(evaluations_with_results)) for question in self.filter_text_and_heading_questions(questionnaire.questions.all()): self.write_cell(question.text, "italic" if question.is_heading_question else "default") + question_average, question_approval_count = self._calculate_display_result_average( + all_evaluations_with_results, questionnaire.id, question + ) + + if question_average is not None: + if question.is_yes_no_question: + self.write_cell(f"{question_approval_count:.0%}", self.grade_to_style(question_average)) + else: + self.write_cell(question_average, self.grade_to_style(question_average)) + else: + self.write_cell("", "border_left_right") + + # evaluations for __, results in evaluations_with_results: if questionnaire.id not in results or question.is_heading_question: self.write_cell(style="border_left_right") continue - values = [] - count_sum = 0 - approval_count = 0 - - for grade_result in results[questionnaire.id]: - if grade_result.question.id != question.id or not RatingResult.has_answers(grade_result): - continue + avg, percent_approval = self._calculate_display_result(questionnaire.id, question, results) - values.append(grade_result.average * grade_result.count_sum) - count_sum += grade_result.count_sum - if grade_result.question.is_yes_no_question: - approval_count += grade_result.approval_count - - if not values: + if avg is None: self.write_cell(style="border_left_right") continue - avg = sum(values) / count_sum if question.is_yes_no_question: - percent_approval = approval_count / count_sum if count_sum > 0 else 0 self.write_cell(f"{percent_approval:.0%}", self.grade_to_style(avg)) else: self.write_cell(avg, self.grade_to_style(avg)) self.next_row() - self.write_empty_row_with_styles(["default"] + ["border_left_right"] * len(evaluations_with_results)) + self.write_empty_row_with_styles(["default"] + ["border_left_right"] * (len(evaluations_with_results) + 1)) - # pylint: disable=arguments-differ + # pylint: disable=arguments-differ,too-many-locals def export_impl( self, semesters: QuerySetOrSequence[Semester], @@ -335,6 +409,8 @@ def export_impl( # We want to throw early here, since workbook.save() will throw an IndexError otherwise. assert len(selection_list) > 0 + all_evaluations_with_results, _, _ = self.filter_evaluations(evaluation_states=[Evaluation.State.PUBLISHED]) + for sheet_counter, (program_ids, course_type_ids) in enumerate(selection_list, 1): self.cur_sheet = self.workbook.add_sheet("Sheet " + str(sheet_counter)) self.cur_row = 0 @@ -358,7 +434,9 @@ def export_impl( ) for questionnaire in used_questionnaires: - self.write_questionnaire(questionnaire, evaluations_with_results, contributor) + self.write_questionnaire( + questionnaire, evaluations_with_results, contributor, all_evaluations_with_results + ) self.write_overall_results(evaluations_with_results, course_results_exist) diff --git a/evap/results/tests/test_exporters.py b/evap/results/tests/test_exporters.py index 99bccb477b..87234be0fa 100644 --- a/evap/results/tests/test_exporters.py +++ b/evap/results/tests/test_exporters.py @@ -174,12 +174,12 @@ def test_view_excel_file_sorted(self): # Load responses as Excel files and check for correct sorting workbook = xlrd.open_workbook(file_contents=content_de.read()) - self.assertEqual(workbook.sheets()[0].row_values(0)[1], "A – Evaluation1\n") - self.assertEqual(workbook.sheets()[0].row_values(0)[2], "B – Evaluation2\n") + self.assertEqual(workbook.sheets()[0].row_values(0)[2], "A – Evaluation1\n") + self.assertEqual(workbook.sheets()[0].row_values(0)[3], "B – Evaluation2\n") workbook = xlrd.open_workbook(file_contents=content_en.read()) - self.assertEqual(workbook.sheets()[0].row_values(0)[1], "A – Evaluation2\n") - self.assertEqual(workbook.sheets()[0].row_values(0)[2], "B – Evaluation1\n") + self.assertEqual(workbook.sheets()[0].row_values(0)[2], "A – Evaluation2\n") + self.assertEqual(workbook.sheets()[0].row_values(0)[3], "B – Evaluation1\n") def test_course_type_ordering(self): program = baker.make(Program) @@ -220,8 +220,8 @@ def test_course_type_ordering(self): binary_content.seek(0) workbook = xlrd.open_workbook(file_contents=binary_content.read()) - self.assertEqual(workbook.sheets()[0].row_values(0)[1], evaluation_1.full_name + "\n") - self.assertEqual(workbook.sheets()[0].row_values(0)[2], evaluation_2.full_name + "\n") + self.assertEqual(workbook.sheets()[0].row_values(0)[2], evaluation_1.full_name + "\n") + self.assertEqual(workbook.sheets()[0].row_values(0)[3], evaluation_2.full_name + "\n") course_type_2.order = 0 course_type_2.save() @@ -233,8 +233,8 @@ def test_course_type_ordering(self): binary_content.seek(0) workbook = xlrd.open_workbook(file_contents=binary_content.read()) - self.assertEqual(workbook.sheets()[0].row_values(0)[1], evaluation_2.full_name + "\n") - self.assertEqual(workbook.sheets()[0].row_values(0)[2], evaluation_1.full_name + "\n") + self.assertEqual(workbook.sheets()[0].row_values(0)[2], evaluation_2.full_name + "\n") + self.assertEqual(workbook.sheets()[0].row_values(0)[3], evaluation_1.full_name + "\n") def test_multiple_sheets(self): binary_content = BytesIO() @@ -286,17 +286,17 @@ def test_include_unpublished(self): sheet = self.get_export_sheet( include_unpublished=False, semester=semester, program=program, course_types=course_types ) - self.assertEqual(len(sheet.row_values(0)), 2) - self.assertEqual(sheet.row_values(0)[1][:-1], published_evaluation.full_name) + self.assertEqual(len(sheet.row_values(0)), 3) + self.assertEqual(sheet.row_values(0)[2][:-1], published_evaluation.full_name) # Now, make sure that it appears when wanted sheet = self.get_export_sheet( include_unpublished=True, semester=semester, program=program, course_types=course_types ) - self.assertEqual(len(sheet.row_values(0)), 3) + self.assertEqual(len(sheet.row_values(0)), 4) # These two should be ordered according to evaluation.course.type.order - self.assertEqual(sheet.row_values(0)[1][:-1], published_evaluation.full_name) - self.assertEqual(sheet.row_values(0)[2][:-1], unpublished_evaluation.full_name) + self.assertEqual(sheet.row_values(0)[2][:-1], published_evaluation.full_name) + self.assertEqual(sheet.row_values(0)[3][:-1], unpublished_evaluation.full_name) def test_include_not_enough_voters(self): semester = baker.make(Semester) @@ -325,15 +325,15 @@ def test_include_not_enough_voters(self): # First, make sure that the one with only a single voter does not appear sheet = self.get_export_sheet(semester, program, course_types, include_not_enough_voters=False) - self.assertEqual(len(sheet.row_values(0)), 2) - self.assertEqual(sheet.row_values(0)[1][:-1], enough_voters_evaluation.full_name) + self.assertEqual(len(sheet.row_values(0)), 3) + self.assertEqual(sheet.row_values(0)[2][:-1], enough_voters_evaluation.full_name) # Now, check with the option enabled sheet = self.get_export_sheet(semester, program, course_types, include_not_enough_voters=True) - self.assertEqual(len(sheet.row_values(0)), 3) + self.assertEqual(len(sheet.row_values(0)), 4) self.assertEqual( {enough_voters_evaluation.full_name, not_enough_voters_evaluation.full_name}, - {sheet.row_values(0)[1][:-1], sheet.row_values(0)[2][:-1]}, + {sheet.row_values(0)[2][:-1], sheet.row_values(0)[3][:-1]}, ) def test_no_program_or_course_type(self): @@ -349,7 +349,7 @@ def test_exclude_single_result(self): cache_results(evaluation) sheet = self.get_export_sheet(evaluation.course.semester, program, [evaluation.course.type.id]) self.assertEqual( - len(sheet.row_values(0)), 1, "There should be no column for the evaluation, only the row description" + len(sheet.row_values(0)), 2, "There should be no column for the evaluation, only the row description" ) def test_exclude_used_but_unanswered_questionnaires(self): @@ -385,7 +385,7 @@ def test_program_course_type_name(self): cache_results(evaluation) sheet = self.get_export_sheet(evaluation.course.semester, program, [course_type.id]) - self.assertEqual(sheet.col_values(1)[1:3], [program.name, course_type.name]) + self.assertEqual(sheet.col_values(2)[1:3], [program.name, course_type.name]) def test_multiple_evaluations(self): semester = baker.make(Semester) @@ -401,7 +401,7 @@ def test_multiple_evaluations(self): sheet = self.get_export_sheet(semester, program, [evaluation1.course.type.id, evaluation2.course.type.id]) - self.assertEqual(set(sheet.row_values(0)[1:]), {evaluation1.full_name + "\n", evaluation2.full_name + "\n"}) + self.assertEqual(set(sheet.row_values(0)[2:]), {evaluation1.full_name + "\n", evaluation2.full_name + "\n"}) def test_correct_grades_and_bottom_numbers(self): program = baker.make(Program) @@ -425,11 +425,11 @@ def test_correct_grades_and_bottom_numbers(self): sheet = self.get_export_sheet(evaluation.course.semester, program, [evaluation.course.type.id]) - self.assertEqual(sheet.row_values(5)[1], 2.0) # question 1 average - self.assertEqual(sheet.row_values(8)[1], 3.0) # question 2 average - self.assertEqual(sheet.row_values(10)[1], 2.5) # Average grade - self.assertEqual(sheet.row_values(11)[1], "5/10") # Voters / Participants - self.assertEqual(sheet.row_values(12)[1], "50%") # Voter percentage + self.assertEqual(sheet.row_values(5)[2], 2.0) # question 1 average + self.assertEqual(sheet.row_values(8)[2], 3.0) # question 2 average + self.assertEqual(sheet.row_values(10)[2], 2.5) # Average grade + self.assertEqual(sheet.row_values(11)[2], "5/10") # Voters / Participants + self.assertEqual(sheet.row_values(12)[2], "50%") # Voter percentage def test_course_grade(self): program = baker.make(Program) @@ -457,9 +457,9 @@ def test_course_grade(self): cache_results(evaluation) sheet = self.get_export_sheet(course.semester, program, [course.type.id]) - self.assertEqual(sheet.row_values(12)[1], expected_average) self.assertEqual(sheet.row_values(12)[2], expected_average) self.assertEqual(sheet.row_values(12)[3], expected_average) + self.assertEqual(sheet.row_values(12)[4], expected_average) def test_yes_no_question_result(self): program = baker.make(Program) @@ -480,7 +480,7 @@ def test_yes_no_question_result(self): sheet = self.get_export_sheet(evaluation.course.semester, program, [evaluation.course.type.id]) self.assertEqual(sheet.row_values(5)[0], question.text) - self.assertEqual(sheet.row_values(5)[1], "67%") + self.assertEqual(sheet.row_values(5)[2], "67%") def test_contributor_result_export(self): program = baker.make(Program) @@ -527,24 +527,24 @@ def test_contributor_result_export(self): workbook = xlrd.open_workbook(file_contents=binary_content) self.assertEqual( - workbook.sheets()[0].row_values(0)[1], + workbook.sheets()[0].row_values(0)[2], f"{evaluation_1.full_name}\n{evaluation_1.course.semester.name}\n{contributor.full_name}", ) self.assertEqual( - workbook.sheets()[0].row_values(0)[2], + workbook.sheets()[0].row_values(0)[3], f"{evaluation_2.full_name}\n{evaluation_2.course.semester.name}\n{other_contributor.full_name}", ) self.assertEqual(workbook.sheets()[0].row_values(4)[0], general_questionnaire.public_name) self.assertEqual(workbook.sheets()[0].row_values(5)[0], general_question.text) - self.assertEqual(workbook.sheets()[0].row_values(5)[2], 4.0) + self.assertEqual(workbook.sheets()[0].row_values(5)[3], 4.0) self.assertEqual( workbook.sheets()[0].row_values(7)[0], f"{contributor_questionnaire.public_name} ({contributor.full_name})", ) self.assertEqual(workbook.sheets()[0].row_values(8)[0], contributor_question.text) - self.assertEqual(workbook.sheets()[0].row_values(8)[2], 3.0) + self.assertEqual(workbook.sheets()[0].row_values(8)[3], 3.0) self.assertEqual(workbook.sheets()[0].row_values(10)[0], "Overall Average Grade") - self.assertEqual(workbook.sheets()[0].row_values(10)[2], 3.25) + self.assertEqual(workbook.sheets()[0].row_values(10)[3], 3.25) def test_text_answer_export(self): evaluation = baker.make(Evaluation, state=Evaluation.State.PUBLISHED, can_publish_text_results=True) diff --git a/evap/staff/views.py b/evap/staff/views.py index ca74d87139..4a0fcd2c41 100644 --- a/evap/staff/views.py +++ b/evap/staff/views.py @@ -5,7 +5,7 @@ from dataclasses import dataclass from datetime import date, datetime from enum import Enum -from typing import Any, Final, Literal, cast +from typing import Any, Literal, cast import openpyxl from django.conf import settings @@ -1551,7 +1551,9 @@ def evaluation_person_management(request, evaluation_id: int): raise SuspiciousOperation("Invalid POST operation") import_action = ImportAction.from_operation(operation) - import_type: Final = ImportType.PARTICIPANT if "participants" in operation else ImportType.CONTRIBUTOR + import_type: Literal[ImportType.PARTICIPANT, ImportType.CONTRIBUTOR] = ( + ImportType.PARTICIPANT if "participants" in operation else ImportType.CONTRIBUTOR + ) excel_form = participant_excel_form if "participants" in operation else contributor_excel_form copy_form = participant_copy_form if "participants" in operation else contributor_copy_form From 67c32760baab057660dcc60d61d40b7f37ca318e Mon Sep 17 00:00:00 2001 From: fidoriel <49869342+fidoriel@users.noreply.github.com> Date: Mon, 13 Jan 2025 21:03:24 +0100 Subject: [PATCH 03/24] add tests for results exporter average --- evap/results/tests/test_exporters.py | 157 +++++++++++++++++++++++++++ 1 file changed, 157 insertions(+) diff --git a/evap/results/tests/test_exporters.py b/evap/results/tests/test_exporters.py index 87234be0fa..6f02a77822 100644 --- a/evap/results/tests/test_exporters.py +++ b/evap/results/tests/test_exporters.py @@ -589,3 +589,160 @@ def test_text_answer_export(self): self.assertEqual(sheet.row_values(3)[0], questions[0].text) self.assertEqual(sheet.row_values(5)[0], questions[1].text) self.assertEqual(sheet.row_values(6)[0], questions[2].text) + + def test_total_average(self): + program = baker.make(Program) + + questionnaire = baker.make(Questionnaire, order=1, type=Questionnaire.Type.TOP) + + question = baker.make(Question, type=QuestionType.POSITIVE_LIKERT, questionnaire=questionnaire) + + evaluation_1 = baker.make( + Evaluation, + course__programs=[program], + state=Evaluation.State.PUBLISHED, + _participant_count=2, + _voter_count=2, + ) + + evaluation_2 = baker.make( + Evaluation, + course__programs=[program], + state=Evaluation.State.PUBLISHED, + _participant_count=2, + _voter_count=2, + ) + + evaluation_1.general_contribution.questionnaires.set([questionnaire]) + + make_rating_answer_counters(question, evaluation_1.general_contribution) + + evaluation_2.general_contribution.questionnaires.set([questionnaire]) + + make_rating_answer_counters(question, evaluation_2.general_contribution) + + cache_results(evaluation_1) + cache_results(evaluation_2) + + binary_content = BytesIO() + ResultsExporter().export( + binary_content, + [evaluation_1.course.semester, evaluation_2.course.semester], + [ + ( + [course_program.id for course_program in evaluation_1.course.programs.all()] + + [course_program.id for course_program in evaluation_2.course.programs.all()], + [evaluation_1.course.type.id, evaluation_2.course.type.id], + ) + ], + True, + True, + ) + binary_content.seek(0) + workbook = xlrd.open_workbook(file_contents=binary_content.read()) + + self.assertEqual( + float(workbook.sheets()[0].row_values(5)[1]), + (float(workbook.sheets()[0].row_values(5)[2]) + float(workbook.sheets()[0].row_values(5)[3])) / 2, + ) + + self.assertEqual(workbook.sheets()[0].row_values(0)[1], "Average for this Question") + + def test_not_all_contributions_are_created_equal(self): + program = baker.make(Program) + + questionnaire_1 = baker.make(Questionnaire, order=1, type=Questionnaire.Type.TOP) + questionnaire_2 = baker.make(Questionnaire, order=4, type=Questionnaire.Type.TOP) + + question_1 = baker.make(Question, type=QuestionType.POSITIVE_LIKERT, questionnaire=questionnaire_1) + question_2 = baker.make(Question, type=QuestionType.POSITIVE_LIKERT, questionnaire=questionnaire_2) + + evaluation_1 = baker.make( + Evaluation, + course__programs=[program], + state=Evaluation.State.PUBLISHED, + _participant_count=2, + _voter_count=2, + ) + + evaluation_2 = baker.make( + Evaluation, + course__programs=[program], + state=Evaluation.State.PUBLISHED, + _participant_count=2, + _voter_count=2, + ) + + evaluation_1.general_contribution.questionnaires.set([questionnaire_1]) + + make_rating_answer_counters(question_1, evaluation_1.general_contribution) + + evaluation_2.general_contribution.questionnaires.set([questionnaire_1, questionnaire_2]) + + make_rating_answer_counters(question_1, evaluation_2.general_contribution) + make_rating_answer_counters(question_2, evaluation_2.general_contribution) + + cache_results(evaluation_1) + cache_results(evaluation_2) + + binary_content = BytesIO() + ResultsExporter().export( + binary_content, + [evaluation_1.course.semester, evaluation_2.course.semester], + [ + ( + [course_program.id for course_program in evaluation_1.course.programs.all()] + + [course_program.id for course_program in evaluation_2.course.programs.all()], + [evaluation_1.course.type.id, evaluation_2.course.type.id], + ) + ], + True, + True, + ) + binary_content.seek(0) + workbook = xlrd.open_workbook(file_contents=binary_content.read()) + + self.assertEqual(float(workbook.sheets()[0].row_values(8)[1]), float(workbook.sheets()[0].row_values(8)[3])) + self.assertEqual("", workbook.sheets()[0].row_values(8)[2]) + + self.assertEqual(workbook.sheets()[0].row_values(0)[1], "Average for this Question") + + def test_only_export_averages(self): + program = baker.make(Program) + + questionnaire = baker.make(Questionnaire, order=1, type=Questionnaire.Type.TOP) + + question = baker.make(Question, type=QuestionType.POSITIVE_LIKERT, questionnaire=questionnaire) + + evaluation = baker.make( + Evaluation, + course__programs=[program], + state=Evaluation.State.PUBLISHED, + _participant_count=2, + _voter_count=2, + ) + + evaluation.general_contribution.questionnaires.set([questionnaire]) + + make_rating_answer_counters(question, evaluation.general_contribution) + + cache_results(evaluation) + + binary_content = BytesIO() + ResultsExporter().export( + binary_content, + [], + [ + ( + [], + [], + ) + ], + True, + True, + ) + binary_content.seek(0) + workbook = xlrd.open_workbook(file_contents=binary_content.read()) + + self.assertEqual(workbook.sheets()[0].row_values(0)[1], "Average for this Question") + self.assertEqual(workbook.sheets()[0].row_values(5)[1], 1.0) From 676bd402c6fbbdb5bacd1ba350a0b7922bdf6a65 Mon Sep 17 00:00:00 2001 From: fidoriel <49869342+fidoriel@users.noreply.github.com> Date: Mon, 27 Jan 2025 19:25:44 +0100 Subject: [PATCH 04/24] adress comments --- evap/results/exporters.py | 52 +++++++++++++++++++-------------------- 1 file changed, 25 insertions(+), 27 deletions(-) diff --git a/evap/results/exporters.py b/evap/results/exporters.py index 535081ecf6..ee525dd6e2 100644 --- a/evap/results/exporters.py +++ b/evap/results/exporters.py @@ -203,7 +203,7 @@ def write_headings_and_evaluation_info( else: self.write_cell(export_name, "headline") - self.write_cell("Average for this Question", "evaluation") + self.write_cell(_("Average for this Question"), "evaluation") for evaluation, __ in evaluations_with_results: title = evaluation.full_name @@ -226,7 +226,7 @@ def write_headings_and_evaluation_info( self.write_cell(evaluation.course.type.name, "border_left_right") self.next_row() - # One more cell is needed for the question column + # One column for the question, one column for the average, n columns for the evaluations self.write_empty_row_with_styles(["default"] + ["border_left_right"] * (len(evaluations_with_results) + 1)) def write_overall_results( @@ -259,7 +259,7 @@ def write_overall_results( # Only query the number of evaluations once and keep track of it here. count_gt_1: list[bool] = [e.course_evaluations_count > 1 for e in annotated_evaluations] - # Borders only if there is a course grade below. Offset by one column + # Borders only if there is a course grade below. Offset by one column for column title and one for average self.write_empty_row_with_styles( ["default", "default"] + ["border_left_right" if gt1 else "default" for gt1 in count_gt_1] ) @@ -293,7 +293,7 @@ def write_overall_results( def _calculate_display_result( cls, questionnaire_id: int, question: Question, results: OrderedDict[int, list[QuestionResult]] ) -> tuple[float | None, float | None]: - values = [] + value_sum = 0.0 count_sum = 0 approval_count = 0 @@ -301,18 +301,18 @@ def _calculate_display_result( if grade_result.question.id != question.id or not RatingResult.has_answers(grade_result): continue - values.append(grade_result.average * grade_result.count_sum) + value_sum += grade_result.average * grade_result.count_sum count_sum += grade_result.count_sum if grade_result.question.is_yes_no_question: approval_count += grade_result.approval_count - if not values: + if not value_sum: return None, None - avg = sum(values) / count_sum + avg = value_sum / count_sum if question.is_yes_no_question: - percent_approval = approval_count / count_sum if count_sum > 0 else 0 - return avg, percent_approval + average_approval_ratio = approval_count / count_sum if count_sum > 0 else 0 + return avg, average_approval_ratio return avg, None @classmethod @@ -322,26 +322,26 @@ def _calculate_display_result_average( questionnaire_id: int, question: Question, ) -> tuple[float | None, float | None]: - avg_values = [] + avg_value_sum = 0.0 count_avg = 0 - avg_approval = [] + avg_approval_sum = 0.0 count_approval = 0 for __, results in evaluations_with_results: if ( results.get(questionnaire_id) is None - ): # ignore all results without the questionaire for average calculation + ): # we iterate over all distinct questionaires from all evaluations but some evaluations do not include a specific questionaire continue - avg, percent_approval = cls._calculate_display_result(questionnaire_id, question, results) + avg, average_approval_ratio = cls._calculate_display_result(questionnaire_id, question, results) if avg is not None: - avg_values.append(avg) + avg_value_sum += avg count_avg += 1 - if percent_approval is not None: - avg_approval.append(percent_approval) + if average_approval_ratio is not None: + avg_approval_sum += average_approval_ratio count_approval += 1 - return sum(avg_values) / count_avg if count_avg else None, ( - sum(avg_approval) / count_approval if count_approval else None + return avg_value_sum / count_avg if count_avg else None, ( + avg_approval_sum / count_approval if count_approval else None ) def write_questionnaire( @@ -364,15 +364,13 @@ def write_questionnaire( for question in self.filter_text_and_heading_questions(questionnaire.questions.all()): self.write_cell(question.text, "italic" if question.is_heading_question else "default") - question_average, question_approval_count = self._calculate_display_result_average( + average_grade, approval_ratio = self._calculate_display_result_average( all_evaluations_with_results, questionnaire.id, question ) - - if question_average is not None: - if question.is_yes_no_question: - self.write_cell(f"{question_approval_count:.0%}", self.grade_to_style(question_average)) - else: - self.write_cell(question_average, self.grade_to_style(question_average)) + if approval_ratio is not None and average_grade is not None: + self.write_cell(f"{approval_ratio:.0%}", self.grade_to_style(average_grade)) + elif average_grade is not None: + self.write_cell(average_grade, self.grade_to_style(average_grade)) else: self.write_cell("", "border_left_right") @@ -382,14 +380,14 @@ def write_questionnaire( self.write_cell(style="border_left_right") continue - avg, percent_approval = self._calculate_display_result(questionnaire.id, question, results) + avg, average_approval_ratio = self._calculate_display_result(questionnaire.id, question, results) if avg is None: self.write_cell(style="border_left_right") continue if question.is_yes_no_question: - self.write_cell(f"{percent_approval:.0%}", self.grade_to_style(avg)) + self.write_cell(f"{average_approval_ratio:.0%}", self.grade_to_style(avg)) else: self.write_cell(avg, self.grade_to_style(avg)) self.next_row() From 3bc8814b64da3ef047cab3e4ae12b5a250ccbd83 Mon Sep 17 00:00:00 2001 From: fidoriel <49869342+fidoriel@users.noreply.github.com> Date: Mon, 17 Feb 2025 17:54:46 +0100 Subject: [PATCH 05/24] revert to tuple --- evap/results/exporters.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/evap/results/exporters.py b/evap/results/exporters.py index ee525dd6e2..49ce7c7401 100644 --- a/evap/results/exporters.py +++ b/evap/results/exporters.py @@ -266,7 +266,7 @@ def write_overall_results( self.write_cell(_("Evaluation weight"), "bold") self.write_cell("") - weight_percentages = tuple( + weight_percentages = ( f"{e.weight_percentage}%" if gt1 else None for e, gt1 in zip(annotated_evaluations, count_gt_1, strict=True) ) From a5eedbfb5334f945fb40b414cd81fe0383fe3d6d Mon Sep 17 00:00:00 2001 From: fidoriel <49869342+fidoriel@users.noreply.github.com> Date: Mon, 17 Feb 2025 18:27:37 +0100 Subject: [PATCH 06/24] rename functions --- evap/results/exporters.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/evap/results/exporters.py b/evap/results/exporters.py index 49ce7c7401..e79e6ef1c9 100644 --- a/evap/results/exporters.py +++ b/evap/results/exporters.py @@ -290,7 +290,7 @@ def write_overall_results( ) @classmethod - def _calculate_display_result( + def _get_average_grade_and_approval_ratio( cls, questionnaire_id: int, question: Question, results: OrderedDict[int, list[QuestionResult]] ) -> tuple[float | None, float | None]: value_sum = 0.0 @@ -316,7 +316,7 @@ def _calculate_display_result( return avg, None @classmethod - def _calculate_display_result_average( + def _get_average_of_average_grade_and_approval_ratio( cls, evaluations_with_results: list[tuple[Evaluation, OrderedDict[int, list[QuestionResult]]]], questionnaire_id: int, @@ -332,7 +332,7 @@ def _calculate_display_result_average( results.get(questionnaire_id) is None ): # we iterate over all distinct questionaires from all evaluations but some evaluations do not include a specific questionaire continue - avg, average_approval_ratio = cls._calculate_display_result(questionnaire_id, question, results) + avg, average_approval_ratio = cls._get_average_grade_and_approval_ratio(questionnaire_id, question, results) if avg is not None: avg_value_sum += avg count_avg += 1 @@ -364,7 +364,7 @@ def write_questionnaire( for question in self.filter_text_and_heading_questions(questionnaire.questions.all()): self.write_cell(question.text, "italic" if question.is_heading_question else "default") - average_grade, approval_ratio = self._calculate_display_result_average( + average_grade, approval_ratio = self._get_average_of_average_grade_and_approval_ratio( all_evaluations_with_results, questionnaire.id, question ) if approval_ratio is not None and average_grade is not None: @@ -380,7 +380,9 @@ def write_questionnaire( self.write_cell(style="border_left_right") continue - avg, average_approval_ratio = self._calculate_display_result(questionnaire.id, question, results) + avg, average_approval_ratio = self._get_average_grade_and_approval_ratio( + questionnaire.id, question, results + ) if avg is None: self.write_cell(style="border_left_right") From 1bdb5c9a60c378a9ad2e10562eb6e3ef32cb09a8 Mon Sep 17 00:00:00 2001 From: fidoriel <49869342+fidoriel@users.noreply.github.com> Date: Mon, 17 Feb 2025 18:36:33 +0100 Subject: [PATCH 07/24] add style --- evap/evaluation/tools.py | 1 + evap/results/exporters.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/evap/evaluation/tools.py b/evap/evaluation/tools.py index 825e7078cd..5c162166b0 100644 --- a/evap/evaluation/tools.py +++ b/evap/evaluation/tools.py @@ -228,6 +228,7 @@ def content(self, value): class ExcelExporter(ABC): styles = { "default": xlwt.Style.default_style, + "missing_average": xlwt.Style.default_style, "headline": xlwt.easyxf( "font: bold on, height 400; alignment: horiz centre, vert centre, wrap on; borders: bottom medium", num_format_str="0.0", diff --git a/evap/results/exporters.py b/evap/results/exporters.py index e79e6ef1c9..9c79b1a692 100644 --- a/evap/results/exporters.py +++ b/evap/results/exporters.py @@ -265,7 +265,7 @@ def write_overall_results( ) self.write_cell(_("Evaluation weight"), "bold") - self.write_cell("") + self.write_cell("", "missing_average") weight_percentages = ( f"{e.weight_percentage}%" if gt1 else None for e, gt1 in zip(annotated_evaluations, count_gt_1, strict=True) @@ -273,7 +273,7 @@ def write_overall_results( self.write_row(weight_percentages, lambda s: "evaluation_weight" if s is not None else "default") self.write_cell(_("Course Grade"), "bold") - self.write_cell("") + self.write_cell("", "missing_average") for evaluation, gt1 in zip(annotated_evaluations, count_gt_1, strict=True): if not gt1: self.write_cell() From a697d21b75fc9ed07c76c59277d1cd55b5470c9d Mon Sep 17 00:00:00 2001 From: fidoriel <49869342+fidoriel@users.noreply.github.com> Date: Mon, 17 Feb 2025 20:06:21 +0100 Subject: [PATCH 08/24] remove useless test --- evap/results/exporters.py | 8 +++--- evap/results/tests/test_exporters.py | 40 ---------------------------- 2 files changed, 4 insertions(+), 44 deletions(-) diff --git a/evap/results/exporters.py b/evap/results/exporters.py index 9c79b1a692..7e835abcf6 100644 --- a/evap/results/exporters.py +++ b/evap/results/exporters.py @@ -126,13 +126,13 @@ def filter_evaluations( # noqa: PLR0912 used_questionnaires: set[Questionnaire] = set() evaluations_filter = Q() - if semesters: + if semesters is not None: evaluations_filter &= Q(course__semester__in=semesters) - if evaluation_states: + if evaluation_states is not None: evaluations_filter &= Q(state__in=evaluation_states) - if program_ids: + if program_ids is not None: evaluations_filter &= Q(course__programs__in=program_ids) - if course_type_ids: + if course_type_ids is not None: evaluations_filter &= Q(course__type__in=course_type_ids) if contributor: diff --git a/evap/results/tests/test_exporters.py b/evap/results/tests/test_exporters.py index 6f02a77822..cb123a40e0 100644 --- a/evap/results/tests/test_exporters.py +++ b/evap/results/tests/test_exporters.py @@ -706,43 +706,3 @@ def test_not_all_contributions_are_created_equal(self): self.assertEqual("", workbook.sheets()[0].row_values(8)[2]) self.assertEqual(workbook.sheets()[0].row_values(0)[1], "Average for this Question") - - def test_only_export_averages(self): - program = baker.make(Program) - - questionnaire = baker.make(Questionnaire, order=1, type=Questionnaire.Type.TOP) - - question = baker.make(Question, type=QuestionType.POSITIVE_LIKERT, questionnaire=questionnaire) - - evaluation = baker.make( - Evaluation, - course__programs=[program], - state=Evaluation.State.PUBLISHED, - _participant_count=2, - _voter_count=2, - ) - - evaluation.general_contribution.questionnaires.set([questionnaire]) - - make_rating_answer_counters(question, evaluation.general_contribution) - - cache_results(evaluation) - - binary_content = BytesIO() - ResultsExporter().export( - binary_content, - [], - [ - ( - [], - [], - ) - ], - True, - True, - ) - binary_content.seek(0) - workbook = xlrd.open_workbook(file_contents=binary_content.read()) - - self.assertEqual(workbook.sheets()[0].row_values(0)[1], "Average for this Question") - self.assertEqual(workbook.sheets()[0].row_values(5)[1], 1.0) From f618d370339d9eee5b2f046eeb8628abe87a143e Mon Sep 17 00:00:00 2001 From: fidoriel <49869342+fidoriel@users.noreply.github.com> Date: Mon, 10 Mar 2025 18:36:45 +0100 Subject: [PATCH 09/24] rephrase --- evap/results/exporters.py | 2 +- evap/results/tests/test_exporters.py | 10 ++++++++-- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/evap/results/exporters.py b/evap/results/exporters.py index 7e835abcf6..5fe4e9f06a 100644 --- a/evap/results/exporters.py +++ b/evap/results/exporters.py @@ -203,7 +203,7 @@ def write_headings_and_evaluation_info( else: self.write_cell(export_name, "headline") - self.write_cell(_("Average for this Question"), "evaluation") + self.write_cell(_("Average for this question over all evaluations in all published semesters"), "evaluation") for evaluation, __ in evaluations_with_results: title = evaluation.full_name diff --git a/evap/results/tests/test_exporters.py b/evap/results/tests/test_exporters.py index cb123a40e0..222890a5cb 100644 --- a/evap/results/tests/test_exporters.py +++ b/evap/results/tests/test_exporters.py @@ -646,7 +646,10 @@ def test_total_average(self): (float(workbook.sheets()[0].row_values(5)[2]) + float(workbook.sheets()[0].row_values(5)[3])) / 2, ) - self.assertEqual(workbook.sheets()[0].row_values(0)[1], "Average for this Question") + self.assertEqual( + workbook.sheets()[0].row_values(0)[1], + "Average for this question over all evaluations in all published semesters", + ) def test_not_all_contributions_are_created_equal(self): program = baker.make(Program) @@ -705,4 +708,7 @@ def test_not_all_contributions_are_created_equal(self): self.assertEqual(float(workbook.sheets()[0].row_values(8)[1]), float(workbook.sheets()[0].row_values(8)[3])) self.assertEqual("", workbook.sheets()[0].row_values(8)[2]) - self.assertEqual(workbook.sheets()[0].row_values(0)[1], "Average for this Question") + self.assertEqual( + workbook.sheets()[0].row_values(0)[1], + "Average for this question over all evaluations in all published semesters", + ) From ca1c44f6e9afd239e82e34f71326a4090cccfe1a Mon Sep 17 00:00:00 2001 From: fidoriel <49869342+fidoriel@users.noreply.github.com> Date: Mon, 10 Mar 2025 20:25:58 +0100 Subject: [PATCH 10/24] fix test --- evap/results/exporters.py | 5 ++++- evap/results/tests/test_exporters.py | 14 +++++++------- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/evap/results/exporters.py b/evap/results/exporters.py index 5fe4e9f06a..f585add412 100644 --- a/evap/results/exporters.py +++ b/evap/results/exporters.py @@ -39,6 +39,9 @@ class ResultsExporter(ExcelExporter): "evaluation": xlwt.easyxf( "alignment: horiz centre, wrap on, rota 90; borders: left medium, top medium, right medium, bottom medium" ), + "average": xlwt.easyxf( + "alignment: horiz centre, wrap on, rota 90; borders: left medium, top medium, right medium, bottom medium; font: italic on" + ), "total_voters": xlwt.easyxf("alignment: horiz centre; borders: left medium, right medium"), "evaluation_rate": xlwt.easyxf("alignment: horiz centre; borders: left medium, bottom medium, right medium"), "evaluation_weight": xlwt.easyxf("alignment: horiz centre; borders: left medium, right medium"), @@ -203,7 +206,7 @@ def write_headings_and_evaluation_info( else: self.write_cell(export_name, "headline") - self.write_cell(_("Average for this question over all evaluations in all published semesters"), "evaluation") + self.write_cell(_("Average for this question over all evaluations in all published semesters"), "average") for evaluation, __ in evaluations_with_results: title = evaluation.full_name diff --git a/evap/results/tests/test_exporters.py b/evap/results/tests/test_exporters.py index 222890a5cb..ba86e9543a 100644 --- a/evap/results/tests/test_exporters.py +++ b/evap/results/tests/test_exporters.py @@ -595,7 +595,7 @@ def test_total_average(self): questionnaire = baker.make(Questionnaire, order=1, type=Questionnaire.Type.TOP) - question = baker.make(Question, type=QuestionType.POSITIVE_LIKERT, questionnaire=questionnaire) + question = baker.make(Question, type=QuestionType.GRADE, questionnaire=questionnaire) evaluation_1 = baker.make( Evaluation, @@ -609,17 +609,17 @@ def test_total_average(self): Evaluation, course__programs=[program], state=Evaluation.State.PUBLISHED, - _participant_count=2, - _voter_count=2, + _participant_count=3, + _voter_count=3, ) evaluation_1.general_contribution.questionnaires.set([questionnaire]) - make_rating_answer_counters(question, evaluation_1.general_contribution) + make_rating_answer_counters(question, evaluation_1.general_contribution, [1, 1, 0, 0, 0]) evaluation_2.general_contribution.questionnaires.set([questionnaire]) - make_rating_answer_counters(question, evaluation_2.general_contribution) + make_rating_answer_counters(question, evaluation_2.general_contribution, [1, 2, 0, 0, 0]) cache_results(evaluation_1) cache_results(evaluation_2) @@ -641,7 +641,7 @@ def test_total_average(self): binary_content.seek(0) workbook = xlrd.open_workbook(file_contents=binary_content.read()) - self.assertEqual( + self.assertAlmostEqual( float(workbook.sheets()[0].row_values(5)[1]), (float(workbook.sheets()[0].row_values(5)[2]) + float(workbook.sheets()[0].row_values(5)[3])) / 2, ) @@ -706,7 +706,7 @@ def test_not_all_contributions_are_created_equal(self): workbook = xlrd.open_workbook(file_contents=binary_content.read()) self.assertEqual(float(workbook.sheets()[0].row_values(8)[1]), float(workbook.sheets()[0].row_values(8)[3])) - self.assertEqual("", workbook.sheets()[0].row_values(8)[2]) + self.assertEqual("", workbook.sheets()[0].row_values(8)[2]) # testing empty heading for average self.assertEqual( workbook.sheets()[0].row_values(0)[1], From d61be22a09ed609c662f17403d7df6eac9e3f5ce Mon Sep 17 00:00:00 2001 From: fidoriel <49869342+fidoriel@users.noreply.github.com> Date: Mon, 24 Mar 2025 18:21:44 +0100 Subject: [PATCH 11/24] revert type --- evap/staff/views.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/evap/staff/views.py b/evap/staff/views.py index 4a0fcd2c41..ca74d87139 100644 --- a/evap/staff/views.py +++ b/evap/staff/views.py @@ -5,7 +5,7 @@ from dataclasses import dataclass from datetime import date, datetime from enum import Enum -from typing import Any, Literal, cast +from typing import Any, Final, Literal, cast import openpyxl from django.conf import settings @@ -1551,9 +1551,7 @@ def evaluation_person_management(request, evaluation_id: int): raise SuspiciousOperation("Invalid POST operation") import_action = ImportAction.from_operation(operation) - import_type: Literal[ImportType.PARTICIPANT, ImportType.CONTRIBUTOR] = ( - ImportType.PARTICIPANT if "participants" in operation else ImportType.CONTRIBUTOR - ) + import_type: Final = ImportType.PARTICIPANT if "participants" in operation else ImportType.CONTRIBUTOR excel_form = participant_excel_form if "participants" in operation else contributor_excel_form copy_form = participant_copy_form if "participants" in operation else contributor_copy_form From 4ae2571ba7f31abedb97879fbc2ad7722c79878f Mon Sep 17 00:00:00 2001 From: fidoriel <49869342+fidoriel@users.noreply.github.com> Date: Mon, 24 Mar 2025 19:01:53 +0100 Subject: [PATCH 12/24] use hard coded in test --- evap/results/tests/test_exporters.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/evap/results/tests/test_exporters.py b/evap/results/tests/test_exporters.py index ba86e9543a..48f47e57e2 100644 --- a/evap/results/tests/test_exporters.py +++ b/evap/results/tests/test_exporters.py @@ -641,10 +641,7 @@ def test_total_average(self): binary_content.seek(0) workbook = xlrd.open_workbook(file_contents=binary_content.read()) - self.assertAlmostEqual( - float(workbook.sheets()[0].row_values(5)[1]), - (float(workbook.sheets()[0].row_values(5)[2]) + float(workbook.sheets()[0].row_values(5)[3])) / 2, - ) + self.assertAlmostEqual(workbook.sheets()[0].row_values(5)[1], 1.5833333333333335) self.assertEqual( workbook.sheets()[0].row_values(0)[1], From eb96a66749ac189e3b3fef59c3bd2e1671871777 Mon Sep 17 00:00:00 2001 From: fidoriel <49869342+fidoriel@users.noreply.github.com> Date: Mon, 24 Mar 2025 20:07:10 +0100 Subject: [PATCH 13/24] merge tests --- evap/results/exporters.py | 8 +-- evap/results/tests/test_exporters.py | 76 ++++------------------------ 2 files changed, 15 insertions(+), 69 deletions(-) diff --git a/evap/results/exporters.py b/evap/results/exporters.py index f585add412..3c465540b9 100644 --- a/evap/results/exporters.py +++ b/evap/results/exporters.py @@ -333,7 +333,7 @@ def _get_average_of_average_grade_and_approval_ratio( for __, results in evaluations_with_results: if ( results.get(questionnaire_id) is None - ): # we iterate over all distinct questionaires from all evaluations but some evaluations do not include a specific questionaire + ): # we iterate over all distinct questionnaires from all evaluations but some evaluations do not include a specific questionnaire continue avg, average_approval_ratio = cls._get_average_grade_and_approval_ratio(questionnaire_id, question, results) if avg is not None: @@ -343,9 +343,9 @@ def _get_average_of_average_grade_and_approval_ratio( avg_approval_sum += average_approval_ratio count_approval += 1 - return avg_value_sum / count_avg if count_avg else None, ( - avg_approval_sum / count_approval if count_approval else None - ) + avg_value = avg_value_sum / count_avg if count_avg else None + avg_approval = avg_approval_sum / count_approval if count_approval else None + return avg_value, avg_approval def write_questionnaire( self, diff --git a/evap/results/tests/test_exporters.py b/evap/results/tests/test_exporters.py index 48f47e57e2..4f1584d84f 100644 --- a/evap/results/tests/test_exporters.py +++ b/evap/results/tests/test_exporters.py @@ -593,68 +593,10 @@ def test_text_answer_export(self): def test_total_average(self): program = baker.make(Program) - questionnaire = baker.make(Questionnaire, order=1, type=Questionnaire.Type.TOP) - - question = baker.make(Question, type=QuestionType.GRADE, questionnaire=questionnaire) - - evaluation_1 = baker.make( - Evaluation, - course__programs=[program], - state=Evaluation.State.PUBLISHED, - _participant_count=2, - _voter_count=2, - ) - - evaluation_2 = baker.make( - Evaluation, - course__programs=[program], - state=Evaluation.State.PUBLISHED, - _participant_count=3, - _voter_count=3, - ) - - evaluation_1.general_contribution.questionnaires.set([questionnaire]) - - make_rating_answer_counters(question, evaluation_1.general_contribution, [1, 1, 0, 0, 0]) - - evaluation_2.general_contribution.questionnaires.set([questionnaire]) - - make_rating_answer_counters(question, evaluation_2.general_contribution, [1, 2, 0, 0, 0]) - - cache_results(evaluation_1) - cache_results(evaluation_2) - - binary_content = BytesIO() - ResultsExporter().export( - binary_content, - [evaluation_1.course.semester, evaluation_2.course.semester], - [ - ( - [course_program.id for course_program in evaluation_1.course.programs.all()] - + [course_program.id for course_program in evaluation_2.course.programs.all()], - [evaluation_1.course.type.id, evaluation_2.course.type.id], - ) - ], - True, - True, - ) - binary_content.seek(0) - workbook = xlrd.open_workbook(file_contents=binary_content.read()) - - self.assertAlmostEqual(workbook.sheets()[0].row_values(5)[1], 1.5833333333333335) - - self.assertEqual( - workbook.sheets()[0].row_values(0)[1], - "Average for this question over all evaluations in all published semesters", - ) - - def test_not_all_contributions_are_created_equal(self): - program = baker.make(Program) - questionnaire_1 = baker.make(Questionnaire, order=1, type=Questionnaire.Type.TOP) questionnaire_2 = baker.make(Questionnaire, order=4, type=Questionnaire.Type.TOP) - question_1 = baker.make(Question, type=QuestionType.POSITIVE_LIKERT, questionnaire=questionnaire_1) + question_1 = baker.make(Question, type=QuestionType.GRADE, questionnaire=questionnaire_1) question_2 = baker.make(Question, type=QuestionType.POSITIVE_LIKERT, questionnaire=questionnaire_2) evaluation_1 = baker.make( @@ -669,17 +611,17 @@ def test_not_all_contributions_are_created_equal(self): Evaluation, course__programs=[program], state=Evaluation.State.PUBLISHED, - _participant_count=2, - _voter_count=2, + _participant_count=3, + _voter_count=3, ) evaluation_1.general_contribution.questionnaires.set([questionnaire_1]) - make_rating_answer_counters(question_1, evaluation_1.general_contribution) + make_rating_answer_counters(question_1, evaluation_1.general_contribution, [1, 1, 0, 0, 0]) evaluation_2.general_contribution.questionnaires.set([questionnaire_1, questionnaire_2]) - make_rating_answer_counters(question_1, evaluation_2.general_contribution) + make_rating_answer_counters(question_1, evaluation_2.general_contribution, [1, 2, 0, 0, 0]) make_rating_answer_counters(question_2, evaluation_2.general_contribution) cache_results(evaluation_1) @@ -702,10 +644,14 @@ def test_not_all_contributions_are_created_equal(self): binary_content.seek(0) workbook = xlrd.open_workbook(file_contents=binary_content.read()) - self.assertEqual(float(workbook.sheets()[0].row_values(8)[1]), float(workbook.sheets()[0].row_values(8)[3])) - self.assertEqual("", workbook.sheets()[0].row_values(8)[2]) # testing empty heading for average + self.assertAlmostEqual(workbook.sheets()[0].row_values(5)[1], 1.5833333333333335) self.assertEqual( workbook.sheets()[0].row_values(0)[1], "Average for this question over all evaluations in all published semesters", ) + + # average for second questionnaire must be the value from evaluation2 (since evaluation1 doesn't have the questionnaire) + self.assertEqual(float(workbook.sheets()[0].row_values(8)[1]), float(workbook.sheets()[0].row_values(8)[3])) + # average field next to the questionnaire title must be empty + self.assertEqual("", workbook.sheets()[0].row_values(8)[2]) From 8a736d0713823104b69910edfbf23cfe49ab1ce4 Mon Sep 17 00:00:00 2001 From: fidoriel <49869342+fidoriel@users.noreply.github.com> Date: Mon, 19 May 2025 18:31:37 +0200 Subject: [PATCH 14/24] revert changes --- evap/evaluation/models.py | 9 +++++++++ evap/evaluation/tests/test_models.py | 1 + evap/results/tools.py | 4 ++-- .../templates/staff_evaluation_textanswers_section.html | 3 +++ evap/staff/views.py | 1 + 5 files changed, 16 insertions(+), 2 deletions(-) diff --git a/evap/evaluation/models.py b/evap/evaluation/models.py index e9b3e8aaee..8e95bf5881 100644 --- a/evap/evaluation/models.py +++ b/evap/evaluation/models.py @@ -1528,6 +1528,7 @@ class ReviewDecision(models.TextChoices): """ PUBLIC = "PU", _("public") + PRIVATE = "PR", _("private") # This answer should only be displayed to the contributor the question was about DELETED = "DE", _("deleted") UNDECIDED = "UN", _("undecided") @@ -1556,6 +1557,10 @@ class Meta: def will_be_deleted(self): return self.review_decision == self.ReviewDecision.DELETED + @property + def will_be_private(self): + return self.review_decision == self.ReviewDecision.PRIVATE + @property def will_be_public(self): return self.review_decision == self.ReviewDecision.PUBLIC @@ -1566,6 +1571,10 @@ def will_be_public(self): def is_public(self): return self.will_be_public + @property + def is_private(self): + return self.will_be_private + @property def is_reviewed(self): return self.review_decision != self.ReviewDecision.UNDECIDED diff --git a/evap/evaluation/tests/test_models.py b/evap/evaluation/tests/test_models.py index 70442cf870..e74f873c6e 100644 --- a/evap/evaluation/tests/test_models.py +++ b/evap/evaluation/tests/test_models.py @@ -366,6 +366,7 @@ def test_textanswers_to_delete_get_deleted_on_publish(self): [ TextAnswer.ReviewDecision.DELETED, TextAnswer.ReviewDecision.PUBLIC, + TextAnswer.ReviewDecision.PRIVATE, ] ), _quantity=3, diff --git a/evap/results/tools.py b/evap/results/tools.py index 9b2e02b6d4..e4f7abf43b 100644 --- a/evap/results/tools.py +++ b/evap/results/tools.py @@ -222,7 +222,7 @@ def _get_results_impl(evaluation: Evaluation, *, refetch_related_objects: bool = ((textanswer.contribution_id, textanswer.question_id), textanswer) for contribution in evaluation.contributions.all() for textanswer in contribution.textanswer_set.all() - if textanswer.review_decision == TextAnswer.ReviewDecision.PUBLIC + if textanswer.review_decision in [TextAnswer.ReviewDecision.PRIVATE, TextAnswer.ReviewDecision.PUBLIC] ) racs_per_contribution_question: dict[tuple[int, int], list[RatingAnswerCounter]] = unordered_groupby( @@ -479,7 +479,7 @@ def can_textanswer_be_seen_by( # noqa: PLR0911 textanswer: TextAnswer, view: str, ) -> bool: - assert textanswer.review_decision == TextAnswer.ReviewDecision.PUBLIC + assert textanswer.review_decision in [TextAnswer.ReviewDecision.PRIVATE, TextAnswer.ReviewDecision.PUBLIC] contributor = textanswer.contribution.contributor if view == "public": diff --git a/evap/staff/templates/staff_evaluation_textanswers_section.html b/evap/staff/templates/staff_evaluation_textanswers_section.html index 1dba4c445a..f171cc0041 100644 --- a/evap/staff/templates/staff_evaluation_textanswers_section.html +++ b/evap/staff/templates/staff_evaluation_textanswers_section.html @@ -39,6 +39,9 @@ + + + diff --git a/evap/staff/views.py b/evap/staff/views.py index ca74d87139..16e58022c7 100644 --- a/evap/staff/views.py +++ b/evap/staff/views.py @@ -1753,6 +1753,7 @@ def evaluation_textanswers_update_publish(request): review_decision_for_action = { "publish": TextAnswer.ReviewDecision.PUBLIC, + "make_private": TextAnswer.ReviewDecision.PRIVATE, "delete": TextAnswer.ReviewDecision.DELETED, "unreview": TextAnswer.ReviewDecision.UNDECIDED, } From b3adec6462dfef165cbff05e40de10d6b9f73391 Mon Sep 17 00:00:00 2001 From: fidoriel <49869342+fidoriel@users.noreply.github.com> Date: Mon, 20 Oct 2025 19:22:42 +0200 Subject: [PATCH 15/24] Update evap/results/exporters.py Co-authored-by: Niklas Mohrin --- evap/results/exporters.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/evap/results/exporters.py b/evap/results/exporters.py index 3c465540b9..2271424bdb 100644 --- a/evap/results/exporters.py +++ b/evap/results/exporters.py @@ -218,7 +218,7 @@ def write_headings_and_evaluation_info( self.next_row() self.write_cell(_("Programs"), "bold") - self.write_cell("", "program") + self.write_cell("", "program") # empty cell in grade-average column for evaluation, __ in evaluations_with_results: self.write_cell("\n".join([d.name for d in evaluation.course.programs.all()]), "program") From f93ee5b73aeae605ce7bf99a79ac4e67d1bee99f Mon Sep 17 00:00:00 2001 From: fidoriel <49869342+fidoriel@users.noreply.github.com> Date: Mon, 20 Oct 2025 19:25:34 +0200 Subject: [PATCH 16/24] Update evap/results/exporters.py Co-authored-by: Niklas Mohrin --- evap/results/exporters.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/evap/results/exporters.py b/evap/results/exporters.py index 2271424bdb..898f01c8d3 100644 --- a/evap/results/exporters.py +++ b/evap/results/exporters.py @@ -206,7 +206,7 @@ def write_headings_and_evaluation_info( else: self.write_cell(export_name, "headline") - self.write_cell(_("Average for this question over all evaluations in all published semesters"), "average") + self.write_cell(_("Average result for this question over all published evaluations in all semesters"), "average") for evaluation, __ in evaluations_with_results: title = evaluation.full_name From 3163bd688fea30d8a3c352f6d60c18765cce1e3d Mon Sep 17 00:00:00 2001 From: fidoriel <49869342+fidoriel@users.noreply.github.com> Date: Mon, 20 Oct 2025 19:25:45 +0200 Subject: [PATCH 17/24] Update evap/results/exporters.py Co-authored-by: Niklas Mohrin --- evap/results/exporters.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/evap/results/exporters.py b/evap/results/exporters.py index 898f01c8d3..ef74cee6a6 100644 --- a/evap/results/exporters.py +++ b/evap/results/exporters.py @@ -224,7 +224,7 @@ def write_headings_and_evaluation_info( self.next_row() self.write_cell(_("Course Type"), "bold") - self.write_cell("", "program") + self.write_cell("", "border_left_right") # empty cell in grade-average column for evaluation, __ in evaluations_with_results: self.write_cell(evaluation.course.type.name, "border_left_right") From fc91dbce35792b8c03ae4a083dd051bee98da926 Mon Sep 17 00:00:00 2001 From: fidoriel <49869342+fidoriel@users.noreply.github.com> Date: Mon, 20 Oct 2025 20:36:16 +0200 Subject: [PATCH 18/24] Update evap/results/exporters.py Co-authored-by: Niklas Mohrin --- evap/results/exporters.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/evap/results/exporters.py b/evap/results/exporters.py index b7a882eb3e..4208aaa3d1 100644 --- a/evap/results/exporters.py +++ b/evap/results/exporters.py @@ -304,7 +304,7 @@ def _get_average_grade_and_approval_ratio( value_sum += grade_result.average * grade_result.count_sum count_sum += grade_result.count_sum - if grade_result.question.is_yes_no_question: + if question.is_yes_no_question: approval_count += grade_result.approval_count if not value_sum: From 648cd143a88274ee00e4e0b8d02cbdce3600a159 Mon Sep 17 00:00:00 2001 From: fidoriel <49869342+fidoriel@users.noreply.github.com> Date: Mon, 20 Oct 2025 20:36:34 +0200 Subject: [PATCH 19/24] Update evap/results/exporters.py Co-authored-by: Niklas Mohrin --- evap/results/exporters.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/evap/results/exporters.py b/evap/results/exporters.py index 4208aaa3d1..e1a845ac0f 100644 --- a/evap/results/exporters.py +++ b/evap/results/exporters.py @@ -307,7 +307,7 @@ def _get_average_grade_and_approval_ratio( if question.is_yes_no_question: approval_count += grade_result.approval_count - if not value_sum: + if not count_sum: return None, None avg = value_sum / count_sum From a65875160232937456c526042eaa99bc85a2be1d Mon Sep 17 00:00:00 2001 From: fidoriel <49869342+fidoriel@users.noreply.github.com> Date: Mon, 20 Oct 2025 20:55:11 +0200 Subject: [PATCH 20/24] make it work with recent changes --- evap/results/exporters.py | 4 +++- evap/results/tests/test_exporters.py | 13 +------------ 2 files changed, 4 insertions(+), 13 deletions(-) diff --git a/evap/results/exporters.py b/evap/results/exporters.py index e1a845ac0f..bc30f6713d 100644 --- a/evap/results/exporters.py +++ b/evap/results/exporters.py @@ -204,7 +204,9 @@ def write_headings_and_evaluation_info( else: self.write_cell(export_name, "headline") - self.write_cell(_("Average result for this question over all published evaluations in all semesters"), "average") + self.write_cell( + _("Average result for this question over all published evaluations in all semesters"), "average" + ) for evaluation, __ in evaluations_with_results: title = evaluation.full_name diff --git a/evap/results/tests/test_exporters.py b/evap/results/tests/test_exporters.py index 4f1584d84f..bd94da8efa 100644 --- a/evap/results/tests/test_exporters.py +++ b/evap/results/tests/test_exporters.py @@ -341,17 +341,6 @@ def test_no_program_or_course_type(self): with self.assertRaises(AssertionError): ResultsExporter().export(BytesIO(), [evaluation.course.semester], []) - def test_exclude_single_result(self): - program = baker.make(Program) - evaluation = baker.make( - Evaluation, is_single_result=True, state=Evaluation.State.PUBLISHED, course__programs=[program] - ) - cache_results(evaluation) - sheet = self.get_export_sheet(evaluation.course.semester, program, [evaluation.course.type.id]) - self.assertEqual( - len(sheet.row_values(0)), 2, "There should be no column for the evaluation, only the row description" - ) - def test_exclude_used_but_unanswered_questionnaires(self): program = baker.make(Program) evaluation = baker.make( @@ -648,7 +637,7 @@ def test_total_average(self): self.assertEqual( workbook.sheets()[0].row_values(0)[1], - "Average for this question over all evaluations in all published semesters", + "Average result for this question over all published evaluations in all semesters", ) # average for second questionnaire must be the value from evaluation2 (since evaluation1 doesn't have the questionnaire) From 1e6065b239f8b4efd0ec5741973ffbc1394f3d7d Mon Sep 17 00:00:00 2001 From: Pipifax <37810842+Belissimo-T@users.noreply.github.com> Date: Mon, 30 Mar 2026 21:06:26 +0200 Subject: [PATCH 21/24] move missing_average style from ExcelExporter to ResultsExporter --- evap/evaluation/tools.py | 1 - evap/results/exporters.py | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/evap/evaluation/tools.py b/evap/evaluation/tools.py index e3f0c8fe7e..8ff1cb777b 100644 --- a/evap/evaluation/tools.py +++ b/evap/evaluation/tools.py @@ -295,7 +295,6 @@ def content(self, value): class ExcelExporter(ABC): styles = { "default": xlwt.Style.default_style, - "missing_average": xlwt.Style.default_style, "headline": xlwt.easyxf( "font: bold on, height 400; alignment: horiz centre, vert centre, wrap on; borders: bottom medium", num_format_str="0.0", diff --git a/evap/results/exporters.py b/evap/results/exporters.py index 53da65d366..914d9e3f50 100644 --- a/evap/results/exporters.py +++ b/evap/results/exporters.py @@ -44,6 +44,7 @@ class ResultsExporter(ExcelExporter): "average": xlwt.easyxf( "alignment: horiz centre, wrap on, rota 90; borders: left medium, top medium, right medium, bottom medium; font: italic on" ), + "missing_average": xlwt.Style.default_style, "total_voters": xlwt.easyxf("alignment: horiz centre; borders: left medium, right medium"), "evaluation_rate": xlwt.easyxf("alignment: horiz centre; borders: left medium, bottom medium, right medium"), "evaluation_weight": xlwt.easyxf("alignment: horiz centre; borders: left medium, right medium"), From 371d934d7b63f2d3828af109bc6af5bc4302f165 Mon Sep 17 00:00:00 2001 From: Pipifax <37810842+Belissimo-T@users.noreply.github.com> Date: Mon, 30 Mar 2026 21:06:59 +0200 Subject: [PATCH 22/24] simplify _get_average_grade_and_approval_ratio arguments --- evap/results/exporters.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/evap/results/exporters.py b/evap/results/exporters.py index 914d9e3f50..4b3e42adbd 100644 --- a/evap/results/exporters.py +++ b/evap/results/exporters.py @@ -297,13 +297,13 @@ def write_overall_results( @classmethod def _get_average_grade_and_approval_ratio( - cls, questionnaire_id: int, question: Question, results: OrderedDict[int, list[QuestionResult]] + cls, question: Question, results: list[QuestionResult] ) -> tuple[float | None, float | None]: value_sum = 0.0 count_sum = 0 approval_count = 0 - for grade_result in results[questionnaire_id]: + for grade_result in results: if grade_result.question.id != question.id or not RatingResult.has_answers(grade_result): continue @@ -338,7 +338,7 @@ def _get_average_of_average_grade_and_approval_ratio( results.get(questionnaire_id) is None ): # we iterate over all distinct questionnaires from all evaluations but some evaluations do not include a specific questionnaire continue - avg, average_approval_ratio = cls._get_average_grade_and_approval_ratio(questionnaire_id, question, results) + avg, average_approval_ratio = cls._get_average_grade_and_approval_ratio(question, results[questionnaire_id]) if avg is not None: avg_value_sum += avg count_avg += 1 @@ -387,7 +387,7 @@ def write_questionnaire( continue avg, average_approval_ratio = self._get_average_grade_and_approval_ratio( - questionnaire.id, question, results + question, results[questionnaire.id] ) if avg is None: From cb004107359b7ef06e9b1ad25c8b6e2909c96670 Mon Sep 17 00:00:00 2001 From: Pipifax <37810842+Belissimo-T@users.noreply.github.com> Date: Tue, 21 Apr 2026 00:49:10 +0200 Subject: [PATCH 23/24] refactor including averages in eval export --- evap/evaluation/tools.py | 5 + evap/results/exporters.py | 217 ++++++++++-------- .../templates/distribution_widget.html | 2 +- evap/results/tests/test_tools.py | 4 +- evap/results/tools.py | 6 +- 5 files changed, 129 insertions(+), 105 deletions(-) diff --git a/evap/evaluation/tools.py b/evap/evaluation/tools.py index 8ff1cb777b..ae15d5d592 100644 --- a/evap/evaluation/tools.py +++ b/evap/evaluation/tools.py @@ -333,6 +333,11 @@ def next_row(self) -> None: self.cur_col = 0 self.cur_row += 1 + def next_sheet(self, sheetname: str): + self.cur_sheet = self.workbook.add_sheet(sheetname) + self.cur_row = 0 + self.cur_col = 0 + def write_row[CV: CellValue]( self, vals: Iterable[CV], diff --git a/evap/results/exporters.py b/evap/results/exporters.py index 4b3e42adbd..142f4d767a 100644 --- a/evap/results/exporters.py +++ b/evap/results/exporters.py @@ -1,8 +1,8 @@ -import warnings -from collections import OrderedDict, defaultdict +import math +import typing +from collections import defaultdict from collections.abc import Iterable, Sequence from itertools import chain, repeat -from typing import Any, TypeVar import xlwt from django.db.models import Q, QuerySet @@ -24,13 +24,32 @@ get_results, ) -T = TypeVar("T", bound=Model) +T = typing.TypeVar("T", bound=Model) QuerySetOrSequence = QuerySet[T] | Sequence[T] -AnnotatedEvaluation = Any +AnnotatedEvaluation = typing.Any + + +class Averager: + def __init__(self): + self.sum = 0 + self.count = 0 + + def record_value(self, value: float, weight: float = 1): + if math.isnan(value): + return + + self.sum += value * weight + self.count += weight + + def current_average(self): + if self.count == 0: + return math.nan + + return self.sum / self.count class ResultsExporter(ExcelExporter): - CUSTOM_COLOR_START = 8 + CUSTOM_COLOR_PALETTE_START_INDEX = 8 NUM_GRADE_COLORS = 21 # 1.0 to 5.0 in 0.2 steps STEP = 0.2 # we only have a limited number of custom colors @@ -65,7 +84,7 @@ def grade_to_style(cls, grade: float) -> str: @classmethod def normalize_number(cls, number: float) -> float: - """floors 'number' to a multiply of cls.STEP""" + """floors number to a multiple of cls.STEP""" rounded_number = round(number, 1) # see #302 return round(int(rounded_number / cls.STEP + 0.0001) * cls.STEP, 1) @@ -79,23 +98,17 @@ def init_grade_styles(cls) -> None: Instances need the styles, but they should only be registered once for xlwt. """ - if len(cls.COLOR_MAPPINGS) > 0: - # Method has already been called (probably in another import of this file). - warnings.warn( - "ResultsExporter.init_grade_styles has been called, " - "although the styles have already been initialized. " - "This can happen, if the file is imported / run multiple " - "times in one application run.", - ImportWarning, - stacklevel=3, - ) - return + if cls.COLOR_MAPPINGS: + raise RuntimeError("ResultsExporter.init_grade_styles has been called twice.") - grade_base_style = "pattern: pattern solid, fore_colour {}; alignment: horiz centre; font: bold on; borders: left medium, right medium" + grade_base_style = ( + "pattern: pattern solid, fore_colour {}; alignment: horiz centre; font: bold on; " + "borders: left medium, right medium" + ) for i in range(cls.NUM_GRADE_COLORS): grade = 1 + i * cls.STEP color = get_grade_color(grade) - palette_index = cls.CUSTOM_COLOR_START + i + palette_index = cls.CUSTOM_COLOR_PALETTE_START_INDEX + i style_name = cls.grade_to_style(grade) color_name = style_name + "_color" xlwt.add_palette_colour(color_name, palette_index) @@ -118,14 +131,14 @@ def filter_text_and_heading_questions(questions: Iterable[Question]) -> list[Que return filtered_questions @staticmethod - def filter_evaluations( # noqa: PLR0912 - semesters: Iterable[Semester] | None = None, - evaluation_states: Iterable[Evaluation.State] | None = None, - program_ids: Iterable[int] | None = None, - course_type_ids: Iterable[int] | None = None, - contributor: UserProfile | None = None, + def filter_evaluations( + semesters: Iterable[Semester] | None, + evaluation_states: Iterable[Evaluation.State] | None, + program_ids: Iterable[int] | None, + course_type_ids: Iterable[int] | None, + contributor: UserProfile | None, include_not_enough_voters: bool = False, - ) -> tuple[list[tuple[Evaluation, OrderedDict[int, list[QuestionResult]]]], list[Questionnaire], bool]: + ) -> tuple[list[tuple[Evaluation, dict[int, list[QuestionResult]]]], list[Questionnaire], bool]: # pylint: disable=too-many-locals course_results_exist = False evaluations_with_results = [] @@ -140,31 +153,34 @@ def filter_evaluations( # noqa: PLR0912 evaluations_filter &= Q(course__programs__in=program_ids) if course_type_ids is not None: evaluations_filter &= Q(course__type__in=course_type_ids) - - if contributor: - evaluations_filter = evaluations_filter & ( - Q(course__responsibles__in=[contributor]) | Q(contributions__contributor__in=[contributor]) + if contributor is not None: + evaluations_filter &= Q(course__responsibles__in=[contributor]) | Q( + contributions__contributor__in=[contributor] ) + evaluations = Evaluation.objects.filter(evaluations_filter).distinct() for evaluation in evaluations: if not evaluation.can_publish_rating_results and not include_not_enough_voters: continue - results: OrderedDict[int, list[QuestionResult]] = OrderedDict() + + results: dict[int, list[QuestionResult]] = defaultdict(list) for contribution_result in get_results(evaluation).contribution_results: for questionnaire_result in contribution_result.questionnaire_results: - # RatingQuestion.counts is a tuple of integers or None, if this tuple is all zero, we want to exclude it - question_results = questionnaire_result.question_results - if not any( - isinstance(question_result, AnsweredRatingResult) for question_result in question_results - ): + questionnaire_has_no_answered_results = all( + not isinstance(question_result, AnsweredRatingResult) + for question_result in questionnaire_result.question_results + ) + if questionnaire_has_no_answered_results: continue + if ( - not contributor + contributor is None or contribution_result.contributor is None or contribution_result.contributor == contributor ): - results.setdefault(questionnaire_result.questionnaire.id, []).extend(question_results) + results[questionnaire_result.questionnaire.id] += questionnaire_result.question_results used_questionnaires.add(questionnaire_result.questionnaire) + annotated_evaluation: AnnotatedEvaluation = evaluation annotated_evaluation.course_evaluations_count = annotated_evaluation.course.evaluations.count() if annotated_evaluation.course_evaluations_count > 1: @@ -185,7 +201,7 @@ def filter_evaluations( # noqa: PLR0912 def write_headings_and_evaluation_info( self, - evaluations_with_results: list[tuple[Evaluation, OrderedDict[int, list[QuestionResult]]]], + evaluations_with_results: list[tuple[Evaluation, dict[int, list[QuestionResult]]]], semesters: QuerySetOrSequence[Semester], contributor: UserProfile | None, programs: Iterable[int], @@ -197,6 +213,7 @@ def write_headings_and_evaluation_info( export_name += f"\n{contributor.full_name}" elif len(semesters) == 1: export_name += f"\n{semesters[0].name}" + if verbose_heading: program_names = [program.name for program in Program.objects.filter(pk__in=programs)] course_type_names = [course_type.name for course_type in CourseType.objects.filter(pk__in=course_types)] @@ -237,7 +254,7 @@ def write_headings_and_evaluation_info( def write_overall_results( self, - evaluations_with_results: list[tuple[AnnotatedEvaluation, OrderedDict[int, list[QuestionResult]]]], + evaluations_with_results: list[tuple[AnnotatedEvaluation, dict[int, list[QuestionResult]]]], course_results_exist: bool, ) -> None: annotated_evaluations = [e for e, __ in evaluations_with_results] @@ -296,89 +313,80 @@ def write_overall_results( ) @classmethod - def _get_average_grade_and_approval_ratio( + def _get_average_grade_and_approval( cls, question: Question, results: list[QuestionResult] - ) -> tuple[float | None, float | None]: - value_sum = 0.0 - count_sum = 0 - approval_count = 0 + ) -> tuple[float, float | None]: + grade_averager = Averager() + approval_averager = Averager() for grade_result in results: if grade_result.question.id != question.id or not RatingResult.has_answers(grade_result): continue - value_sum += grade_result.average * grade_result.count_sum - count_sum += grade_result.count_sum - if question.is_yes_no_question: - approval_count += grade_result.approval_count + grade_averager.record_value(grade_result.grade_average, weight=grade_result.count_sum) - if not count_sum: - return None, None + if question.is_yes_no_question: + approval_averager.record_value(grade_result.approval_average, weight=grade_result.count_sum) - avg = value_sum / count_sum if question.is_yes_no_question: - average_approval_ratio = approval_count / count_sum if count_sum > 0 else 0 - return avg, average_approval_ratio - return avg, None + return grade_averager.current_average(), approval_averager.current_average() + + return grade_averager.current_average(), None @classmethod - def _get_average_of_average_grade_and_approval_ratio( + def _get_average_of_average_grade_and_approval( cls, - evaluations_with_results: list[tuple[Evaluation, OrderedDict[int, list[QuestionResult]]]], + evaluations_with_results: list[tuple[Evaluation, dict[int, list[QuestionResult]]]], questionnaire_id: int, question: Question, - ) -> tuple[float | None, float | None]: - avg_value_sum = 0.0 - count_avg = 0 - avg_approval_sum = 0.0 - count_approval = 0 - - for __, results in evaluations_with_results: - if ( - results.get(questionnaire_id) is None - ): # we iterate over all distinct questionnaires from all evaluations but some evaluations do not include a specific questionnaire + ) -> tuple[float, float | None]: + average_grade_averager = Averager() + average_approval_averager = Averager() + + for __, results_by_questionnaire_id in evaluations_with_results: + if questionnaire_id not in results_by_questionnaire_id: continue - avg, average_approval_ratio = cls._get_average_grade_and_approval_ratio(question, results[questionnaire_id]) - if avg is not None: - avg_value_sum += avg - count_avg += 1 - if average_approval_ratio is not None: - avg_approval_sum += average_approval_ratio - count_approval += 1 - - avg_value = avg_value_sum / count_avg if count_avg else None - avg_approval = avg_approval_sum / count_approval if count_approval else None - return avg_value, avg_approval + + average_grade, average_approval = cls._get_average_grade_and_approval( + question, results_by_questionnaire_id[questionnaire_id] + ) + + average_grade_averager.record_value(average_grade) + + if question.is_yes_no_question: + average_approval_averager.record_value(typing.cast("float", average_approval)) + + if question.is_yes_no_question: + return average_grade_averager.current_average(), average_approval_averager.current_average() + + return average_grade_averager.current_average(), None def write_questionnaire( self, questionnaire: Questionnaire, - evaluations_with_results: list[tuple[Evaluation, OrderedDict[int, list[QuestionResult]]]], + evaluations_with_results: list[tuple[Evaluation, dict[int, list[QuestionResult]]]], contributor: UserProfile | None, - all_evaluations_with_results: list[tuple[Evaluation, OrderedDict[int, list[QuestionResult]]]], + all_evaluations_with_results: list[tuple[Evaluation, dict[int, list[QuestionResult]]]], ) -> None: if contributor and questionnaire.type == Questionnaire.Type.CONTRIBUTOR: self.write_cell(f"{questionnaire.public_name} ({contributor.full_name})", "bold") else: self.write_cell(questionnaire.public_name, "bold") - self.write_cell("", "border_left_right") - - # first cell of row is printed above self.write_empty_row_with_styles(["border_left_right"] * len(evaluations_with_results)) for question in self.filter_text_and_heading_questions(questionnaire.questions.all()): self.write_cell(question.text, "italic" if question.is_heading_question else "default") - average_grade, approval_ratio = self._get_average_of_average_grade_and_approval_ratio( + grade_average, approval_average = self._get_average_of_average_grade_and_approval( all_evaluations_with_results, questionnaire.id, question ) - if approval_ratio is not None and average_grade is not None: - self.write_cell(f"{approval_ratio:.0%}", self.grade_to_style(average_grade)) - elif average_grade is not None: - self.write_cell(average_grade, self.grade_to_style(average_grade)) - else: + if math.isnan(grade_average) or math.isnan(approval_average or 0): self.write_cell("", "border_left_right") + elif question.is_yes_no_question: + self.write_cell(f"{approval_average:.0%}", self.grade_to_style(grade_average)) + else: + self.write_cell(grade_average, self.grade_to_style(grade_average)) # evaluations for __, results in evaluations_with_results: @@ -386,18 +394,21 @@ def write_questionnaire( self.write_cell(style="border_left_right") continue - avg, average_approval_ratio = self._get_average_grade_and_approval_ratio( + evaluation_grade_average, evaluation_approval_ratio_average = self._get_average_grade_and_approval( question, results[questionnaire.id] ) - if avg is None: + if math.isnan(evaluation_grade_average): self.write_cell(style="border_left_right") continue if question.is_yes_no_question: - self.write_cell(f"{average_approval_ratio:.0%}", self.grade_to_style(avg)) + self.write_cell( + f"{evaluation_approval_ratio_average:.0%}", self.grade_to_style(evaluation_grade_average) + ) else: - self.write_cell(avg, self.grade_to_style(avg)) + self.write_cell(evaluation_grade_average, self.grade_to_style(evaluation_grade_average)) + self.next_row() self.write_empty_row_with_styles(["default"] + ["border_left_right"] * (len(evaluations_with_results) + 1)) @@ -415,16 +426,12 @@ def export_impl( # We want to throw early here, since workbook.save() will throw an IndexError otherwise. assert len(selection_list) > 0 - all_evaluations_with_results, _, _ = self.filter_evaluations(evaluation_states=[Evaluation.State.PUBLISHED]) - for sheet_counter, (program_ids, course_type_ids) in enumerate(selection_list, 1): - self.cur_sheet = self.workbook.add_sheet("Sheet " + str(sheet_counter)) - self.cur_row = 0 - self.cur_col = 0 + self.next_sheet("Sheet " + str(sheet_counter)) evaluation_states = [Evaluation.State.PUBLISHED] if include_unpublished: - evaluation_states.extend([Evaluation.State.EVALUATED, Evaluation.State.REVIEWED]) + evaluation_states += [Evaluation.State.EVALUATED, Evaluation.State.REVIEWED] evaluations_with_results, used_questionnaires, course_results_exist = self.filter_evaluations( semesters, @@ -435,6 +442,15 @@ def export_impl( include_not_enough_voters, ) + all_evaluations_with_results, __, ___ = self.filter_evaluations( + semesters=None, + evaluation_states=[Evaluation.State.PUBLISHED], + program_ids=program_ids, + course_type_ids=course_type_ids, + contributor=None, + include_not_enough_voters=False, + ) + self.write_headings_and_evaluation_info( evaluations_with_results, semesters, contributor, program_ids, course_type_ids, verbose_heading ) @@ -447,7 +463,6 @@ def export_impl( self.write_overall_results(evaluations_with_results, course_results_exist) -# See method definition. ResultsExporter.init_grade_styles() diff --git a/evap/results/templates/distribution_widget.html b/evap/results/templates/distribution_widget.html index 5241e0b032..0b7b9a6df2 100644 --- a/evap/results/templates/distribution_widget.html +++ b/evap/results/templates/distribution_widget.html @@ -6,7 +6,7 @@ data-bs-toggle="tooltip" data-bs-placement="left" data-fallback-placement='["left", "bottom"]' title="{% include 'result_widget_tooltip.html' with question_result=question_result %}" > - {% include 'distribution_with_grade.html' with question_result=question_result distribution=question_result.counts|normalized_distribution average=question_result.average %} + {% include 'distribution_with_grade.html' with question_result=question_result distribution=question_result.counts|normalized_distribution average=question_result.grade_average %}
{{ question_result.count_sum }}
diff --git a/evap/results/tests/test_tools.py b/evap/results/tests/test_tools.py index 17daa3f280..916e1d8c26 100644 --- a/evap/results/tests/test_tools.py +++ b/evap/results/tests/test_tools.py @@ -100,7 +100,7 @@ def test_calculation_unipolar_results(self): question_result = questionnaire_result.question_results[0] self.assertEqual(question_result.count_sum, 150) - self.assertAlmostEqual(question_result.average, float(109) / 30) + self.assertAlmostEqual(question_result.grade_average, float(109) / 30) self.assertEqual(question_result.counts, (5, 15, 40, 60, 30)) def test_calculation_bipolar_results(self): @@ -132,7 +132,7 @@ def test_calculation_bipolar_results(self): question_result = questionnaire_result.question_results[0] self.assertEqual(question_result.count_sum, 105) - self.assertAlmostEqual(question_result.average, 2.58730158) + self.assertAlmostEqual(question_result.grade_average, 2.58730158) self.assertEqual(question_result.counts, (5, 5, 15, 30, 25, 15, 10)) self.assertEqual(question_result.minus_balance_count, 32.5) distribution = normalized_distribution(question_result.counts) diff --git a/evap/results/tools.py b/evap/results/tools.py index 6fdf8ae064..353e0be7a0 100644 --- a/evap/results/tools.py +++ b/evap/results/tools.py @@ -121,11 +121,15 @@ def approval_count(self) -> int: class AnsweredRatingResult(PublishedRatingResult): @property - def average(self) -> float: + def grade_average(self) -> float: return ( sum(grade * count for count, grade in zip(self.counts, self.choices.grades, strict=True)) / self.count_sum ) + @property + def approval_average(self) -> float: + return self.approval_count / self.count_sum + class TextResult: def __init__( From 6875a02acdcfebe1a52f17b3c966dc75f4eff267 Mon Sep 17 00:00:00 2001 From: Pipifax <37810842+Belissimo-T@users.noreply.github.com> Date: Mon, 27 Apr 2026 22:14:53 +0200 Subject: [PATCH 24/24] fix existing tests --- evap/results/tests/test_exporters.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/evap/results/tests/test_exporters.py b/evap/results/tests/test_exporters.py index 2feae730f0..a89ada46f0 100644 --- a/evap/results/tests/test_exporters.py +++ b/evap/results/tests/test_exporters.py @@ -613,8 +613,12 @@ def test_total_average(self): questionnaire_1 = baker.make(Questionnaire, order=1, type=Questionnaire.Type.TOP) questionnaire_2 = baker.make(Questionnaire, order=4, type=Questionnaire.Type.TOP) - question_1 = baker.make(Question, type=QuestionType.GRADE, questionnaire=questionnaire_1) - question_2 = baker.make(Question, type=QuestionType.POSITIVE_LIKERT, questionnaire=questionnaire_2) + assignment_1 = baker.make( + QuestionAssignment, question__type=QuestionType.GRADE, questionnaire=questionnaire_1 + ) + assignment_2 = baker.make( + QuestionAssignment, question__type=QuestionType.POSITIVE_LIKERT, questionnaire=questionnaire_2 + ) evaluation_1 = baker.make( Evaluation, @@ -634,12 +638,12 @@ def test_total_average(self): evaluation_1.general_contribution.questionnaires.set([questionnaire_1]) - make_rating_answer_counters(question_1, evaluation_1.general_contribution, [1, 1, 0, 0, 0]) + make_rating_answer_counters(assignment_1, evaluation_1.general_contribution, [1, 1, 0, 0, 0]) evaluation_2.general_contribution.questionnaires.set([questionnaire_1, questionnaire_2]) - make_rating_answer_counters(question_1, evaluation_2.general_contribution, [1, 2, 0, 0, 0]) - make_rating_answer_counters(question_2, evaluation_2.general_contribution) + make_rating_answer_counters(assignment_1, evaluation_2.general_contribution, [1, 2, 0, 0, 0]) + make_rating_answer_counters(assignment_2, evaluation_2.general_contribution) cache_results(evaluation_1) cache_results(evaluation_2)