Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
57 commits
Select commit Hold shift + click to select a range
75b282a
fix allows additional textanswers saving bug
jooooosef May 12, 2025
c077bf6
added counts for grade on staff side
jooooosef May 12, 2025
178b77a
counts for grade on student side
jooooosef May 26, 2025
68d227c
implement questions not being counted for grade calculation
jooooosef May 26, 2025
2a830cb
add db contraint for counts_for_grade
jooooosef Jun 2, 2025
15887a1
add tests to improve coverage
jooooosef Jun 16, 2025
a78c9b9
minor review changes
jooooosef Jun 16, 2025
374c48b
more minor review changes
jooooosef Jul 14, 2025
5ffe1e9
fix migration naming
jooooosef Jul 14, 2025
45e373b
coding style
jooooosef Jul 18, 2025
395fae1
fix bug
jooooosef Jul 21, 2025
265c4b4
fix migations naming and update test_data
jooooosef Jul 21, 2025
4322823
extend test_data.json with more non counting questions
jooooosef Aug 4, 2025
6d3b054
fixup! add tests to improve coverage
jooooosef Aug 4, 2025
5449094
fixup! fix bug
jooooosef Aug 4, 2025
1f9f10a
remove unnessary defaults
jooooosef Aug 4, 2025
64cca89
fix migration naming
jooooosef Aug 11, 2025
fc8b6c3
fix test_data
jooooosef Aug 11, 2025
95e172d
fixup! fix migration naming
jooooosef Aug 11, 2025
b1137b9
implement dropped course not counting for grade
jooooosef Aug 11, 2025
3972951
fix migration naming
jooooosef Aug 11, 2025
6fa978f
fix saving with extra row
jooooosef Aug 18, 2025
be83197
fixup! implement dropped course not counting for grade
jooooosef Aug 18, 2025
5d8925b
fixup! fixup! implement dropped course not counting for grade
jooooosef Aug 18, 2025
32b69d0
fixup! fixup! implement dropped course not counting for grade
jooooosef Aug 18, 2025
887dbaf
add tests for dropout questionnaires
jooooosef Aug 18, 2025
dd39abb
make migration and tests better
jooooosef Oct 20, 2025
ba8ee0f
fix display bug
jooooosef Oct 20, 2025
d2e4284
fix migration naming
jooooosef Oct 20, 2025
141fb5c
format
jooooosef Oct 20, 2025
9f2e32b
fix display error again
jooooosef Oct 20, 2025
5bec147
move questionnaire and question type select change handlng to seperat…
jooooosef Oct 21, 2025
919ad19
use helper functions
jooooosef Oct 21, 2025
252af0c
improve code quality
jooooosef Oct 21, 2025
5d50f74
introduce helper functions to disable/enable and check/uncheck checkb…
jooooosef Oct 21, 2025
b77fa33
refactor QuestionForm initialization and move checkbox disabling to f…
jooooosef Oct 27, 2025
0bd6eb4
clean up code
jooooosef Nov 10, 2025
8b9e9c7
clean up staff-questionnaire form ts
jooooosef Nov 10, 2025
0d68697
fix migration to use exact same contraint logic
jooooosef Nov 10, 2025
babf328
deduplicate code and make more readable
jooooosef Nov 24, 2025
a9f2402
refactor event listener to use delegation on table
jooooosef Nov 24, 2025
75ae977
add live tests for staff questionnaire edit form
jooooosef Nov 24, 2025
2734028
remove old non working tests
jooooosef Nov 24, 2025
72183ca
add regression test for #2539
jooooosef Nov 24, 2025
00045f7
format
jooooosef Nov 24, 2025
4a0b8b3
fix migration naming
jooooosef Nov 24, 2025
d55e3a2
add tests for handling dropout questionnaires in distribution calcula…
jooooosef Nov 24, 2025
a64383d
improve code quality and readability
jooooosef Dec 15, 2025
6b5b471
fix migration naming
jooooosef Jan 26, 2026
5ae44bb
add helper function for question checkbox logic assertions
jooooosef Feb 2, 2026
7708486
move helper for tomselect to LiveServerTest class
jooooosef Feb 9, 2026
c301bc3
fix some tests so they look better
jooooosef Feb 9, 2026
f6131b4
remove duplicated test
jooooosef Feb 9, 2026
2f88eee
UX improvements and code cleanup
jooooosef Feb 16, 2026
280cd08
fix migration naming
jooooosef Feb 16, 2026
d1aa2b4
fix naming
jooooosef Mar 30, 2026
ebf01a2
disable checkboxes with template and add test
jooooosef Mar 30, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
166 changes: 166 additions & 0 deletions evap/development/fixtures/test_data.json

Large diffs are not rendered by default.

42 changes: 42 additions & 0 deletions evap/evaluation/migrations/0161_question_counts_for_grade.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
# Generated by Django 5.2 on 2025-05-12 21:30

from django.db import migrations, models
from django.db.models import Q

TEXT = 0
HEADING = 5
DROPOUT_QUESTIONNAIRE = 5


def set_initial_values(apps, _schema_editor):
Question = apps.get_model("evaluation", "Question")

Question.objects.filter(Q(type__in=[TEXT, HEADING]) | Q(questionnaire__type=DROPOUT_QUESTIONNAIRE)).update(
counts_for_grade=False
)


class Migration(migrations.Migration):
dependencies = [
("evaluation", "0160_evaluation_staff_notes"),
]

operations = [
migrations.AddField(
model_name="question",
name="counts_for_grade",
field=models.BooleanField(default=True, verbose_name="counts toward the evaluation's grade"),
),
migrations.RunPython(set_initial_values, reverse_code=migrations.RunPython.noop),
migrations.AddConstraint(
model_name="question",
constraint=models.CheckConstraint(
condition=models.Q(
models.Q(("type", TEXT), ("type", HEADING), _connector="OR", _negated=True),
("counts_for_grade", False),
_connector="OR",
),
name="check_evaluation_textanswer_or_heading_question_does_not_count_for_grade",
),
),
]
12 changes: 10 additions & 2 deletions evap/evaluation/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -1268,6 +1268,7 @@ class Question(models.Model):
text_en = models.CharField(max_length=1024, verbose_name=_("question text (english)"))
text = translate(en="text_en", de="text_de")
allows_additional_textanswers = models.BooleanField(default=True, verbose_name=_("allow additional text answers"))
counts_for_grade = models.BooleanField(default=True, verbose_name=_("counts toward the evaluation's grade"))

type = models.PositiveSmallIntegerField(choices=QUESTION_TYPES, verbose_name=_("question type"))

Expand All @@ -1282,14 +1283,21 @@ class Meta:
~(Q(type=QuestionType.TEXT) | Q(type=QuestionType.HEADING)) | ~Q(allows_additional_textanswers=True)
),
name="check_evaluation_textanswer_or_heading_question_has_no_additional_textanswers",
)
),
CheckConstraint(
condition=(~(Q(type=QuestionType.TEXT) | Q(type=QuestionType.HEADING)) | Q(counts_for_grade=False)),
name="check_evaluation_textanswer_or_heading_question_does_not_count_for_grade",
),
]

def save(self, *args, **kwargs):
if self.type in [QuestionType.TEXT, QuestionType.HEADING]:
self.allows_additional_textanswers = False
self.counts_for_grade = False
if "update_fields" in kwargs:
kwargs["update_fields"] = {"allows_additional_textanswers"}.union(kwargs["update_fields"])
kwargs["update_fields"] = {"allows_additional_textanswers", "counts_for_grade"}.union(
kwargs["update_fields"]
)
Comment thread
jooooosef marked this conversation as resolved.

super().save(*args, **kwargs)

Expand Down
56 changes: 56 additions & 0 deletions evap/evaluation/tests/test_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
from django.core import mail
from django.core.cache import caches
from django.core.exceptions import ValidationError
from django.db import models
from django.test import override_settings
from django_fsm import TransitionNotAllowed
from model_bakery import baker
Expand Down Expand Up @@ -1146,3 +1147,58 @@ class QuestionnaireTests(TestCase):
def test_locked_contributor_questionnaire(self):
questionnaire = baker.prepare(Questionnaire, is_locked=True, type=Questionnaire.Type.CONTRIBUTOR)
self.assertRaises(ValidationError, questionnaire.clean)


class QuestionTests(TestCase):
def test_save_for_text_and_heading_question_type(self):
questionaire = baker.make(Questionnaire)
# Use prepare() instead of make() to test Question.save() method behavior
question_text = baker.prepare(
Comment thread
jooooosef marked this conversation as resolved.
Question,
questionnaire=questionaire,
type=QuestionType.TEXT,
allows_additional_textanswers=True,
counts_for_grade=True,
)
question_heading = baker.prepare(
Question,
questionnaire=questionaire,
type=QuestionType.HEADING,
allows_additional_textanswers=True,
counts_for_grade=True,
)
question_rating = baker.prepare(
Question,
questionnaire=questionaire,
type=QuestionType.NEGATIVE_LIKERT,
allows_additional_textanswers=True,
counts_for_grade=True,
)

question_rating.save()
question_rating.refresh_from_db()
self.assertEqual(question_rating.allows_additional_textanswers, True)
self.assertEqual(question_rating.counts_for_grade, True)

# Check if setting allows_additional_textanswers and counts_for_grade to False in the save method works
question_rating.type = QuestionType.TEXT
question_rating.save(update_fields=["type"])
question_rating.refresh_from_db()
self.assertEqual(question_rating.allows_additional_textanswers, False)
self.assertEqual(question_rating.counts_for_grade, False)

with patch.object(models.Model, "save") as mock_save:
question_text.save(update_fields=["text_de"])
mock_save.assert_called_once()
args, kwargs = mock_save.call_args
self.assertEqual(
set(kwargs["update_fields"]), {"allows_additional_textanswers", "counts_for_grade", "text_de"}
Comment thread
jooooosef marked this conversation as resolved.
)

mock_save.reset_mock()
question_heading.save(update_fields=["text_de"])
mock_save.assert_called_once()
args, kwargs = mock_save.call_args
self.assertEqual(
set(kwargs["update_fields"]), {"allows_additional_textanswers", "counts_for_grade", "text_de"}
)
3 changes: 3 additions & 0 deletions evap/evaluation/tests/tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -359,6 +359,9 @@ def setUpClass(cls) -> None:
super().setUpClass()
cls.selenium.set_window_size(*cls.window_size)

def set_tomselect_value(self, instance: WebElement, value: str) -> None:
self.selenium.execute_script(f"arguments[0].tomselect.setValue('{value}');", instance)


def classes_of_element(element: WebElement) -> list[str]:
classes = element.get_attribute("class")
Expand Down
5 changes: 5 additions & 0 deletions evap/results/fixtures/minimal_test_data_results.json
Original file line number Diff line number Diff line change
Expand Up @@ -189,6 +189,7 @@
"text_de": "how?",
"text_en": "how?",
"allows_additional_textanswers": false,
"counts_for_grade": false,
"type": 0
}
},
Expand All @@ -201,6 +202,7 @@
"text_de": "how much?",
"text_en": "how much?",
"allows_additional_textanswers": true,
"counts_for_grade": true,
"type": 1
}
},
Expand All @@ -213,6 +215,7 @@
"text_de": "how?",
"text_en": "how?",
"allows_additional_textanswers": false,
"counts_for_grade": false,
"type": 0
}
},
Expand All @@ -225,6 +228,7 @@
"text_de": "how much?",
"text_en": "how much?",
"allows_additional_textanswers": true,
"counts_for_grade": true,
"type": 1
}
},
Expand All @@ -237,6 +241,7 @@
"text_de": "your grade",
"text_en": "your grade",
"allows_additional_textanswers": false,
"counts_for_grade": true,
"type": 2
}
},
Expand Down
1 change: 1 addition & 0 deletions evap/results/tests/test_exporters.py
Original file line number Diff line number Diff line change
Expand Up @@ -544,6 +544,7 @@ def test_text_answer_export(self):
_quantity=len(Questionnaire.Type.values),
_bulk_create=True,
allows_additional_textanswers=False,
counts_for_grade=False,
)

baker.make(
Expand Down
93 changes: 82 additions & 11 deletions evap/results/tests/test_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,8 @@
from evap.results.tools import (
ViewContributorResults,
ViewGeneralResults,
average_grade_questions_distribution,
average_non_grade_rating_questions_distribution,
cache_results,
calculate_average_course_distribution,
calculate_average_distribution,
Expand Down Expand Up @@ -417,24 +419,93 @@ def test_calculate_average_course_distribution(self):
self.assertEqual(distribution[3], 0)
self.assertEqual(distribution[4], 0)

def test_dropout_questionnaires_are_not_included(self):
general_questionnaire = baker.make(Questionnaire, type=Questionnaire.Type.TOP)
general_question = baker.make(Question, questionnaire=general_questionnaire, type=QuestionType.GRADE)
def test_average_questions_distribution(self):
grade_question = baker.make(
Question, questionnaire=self.questionnaire, type=QuestionType.GRADE, counts_for_grade=True
)
non_counting_grade_question = baker.make(
Question, questionnaire=self.questionnaire, type=QuestionType.GRADE, counts_for_grade=False
)
likert_question = baker.make(
Question, questionnaire=self.questionnaire, type=QuestionType.POSITIVE_LIKERT, counts_for_grade=True
)
non_counting_likert_question = baker.make(
Question, questionnaire=self.questionnaire, type=QuestionType.POSITIVE_LIKERT, counts_for_grade=False
)

dropout_questionnaire = baker.make(Questionnaire, type=Questionnaire.Type.DROPOUT)
dropout_question = baker.make(Question, questionnaire=dropout_questionnaire, type=QuestionType.GRADE)
counters = [
*make_rating_answer_counters(non_counting_grade_question, self.contribution1, [0, 0, 0, 0, 1], False),
*make_rating_answer_counters(non_counting_likert_question, self.contribution1, [0, 0, 0, 0, 3], False),
]
RatingAnswerCounter.objects.bulk_create(counters)

contribution = baker.make(
Contribution, evaluation=self.evaluation, questionnaires=[general_questionnaire, dropout_questionnaire]
cache_results(self.evaluation)
evaluation_results = get_results(self.evaluation)

question_results = [
question_result
for contribution_result in evaluation_results.contribution_results
for questionnaire_result in contribution_result.questionnaire_results
for question_result in questionnaire_result.question_results
]

self.assertIsNone(average_grade_questions_distribution(question_results))
self.assertIsNone(average_non_grade_rating_questions_distribution(question_results))

counters = [
*make_rating_answer_counters(grade_question, self.contribution1, [1, 0, 0, 0, 0], False),
*make_rating_answer_counters(likert_question, self.contribution1, [0, 0, 3, 0, 0], False),
]
RatingAnswerCounter.objects.bulk_create(counters)

cache_results(self.evaluation)
evaluation_results = get_results(self.evaluation)

question_results = [
question_result
for contribution_result in evaluation_results.contribution_results
for questionnaire_result in contribution_result.questionnaire_results
for question_result in questionnaire_result.question_results
]

grade_distribution = average_grade_questions_distribution(question_results)
self.assertEqual(grade_distribution, (1, 0, 0, 0, 0)) # Only the counting grade question should be included
self.assertAlmostEqual(distribution_to_grade(grade_distribution), 1.0)

non_grade_distribution = average_non_grade_rating_questions_distribution(question_results)
self.assertEqual(
non_grade_distribution, (0, 0, 1, 0, 0)
) # Only the counting likert question should be included
self.assertAlmostEqual(distribution_to_grade(non_grade_distribution), 3.0)

def test_dropout_questionnaire_excluded_from_distribution(self):
Comment thread
richardebeling marked this conversation as resolved.
make_rating_answer_counters(self.question_grade, self.general_contribution, [0, 0, 0, 0, 10])
cache_results(self.evaluation)

distribution_without_dropout = calculate_average_distribution(self.evaluation)
self.assertIsNotNone(distribution_without_dropout)

dropout_questionnaire = baker.make(Questionnaire, type=Questionnaire.Type.DROPOUT)
dropout_question = baker.make(
Question,
questionnaire=dropout_questionnaire,
type=QuestionType.GRADE,
counts_for_grade=False,
)
self.evaluation.general_contribution.questionnaires.add(dropout_questionnaire)
make_rating_answer_counters(dropout_question, self.evaluation.general_contribution, [10, 0, 0, 0, 0])
cache_results(self.evaluation)

make_rating_answer_counters(general_question, contribution, [10, 10, 0, 0, 0])
make_rating_answer_counters(dropout_question, contribution, [0, 0, 0, 0, 10])
distribution_with_dropout = calculate_average_distribution(self.evaluation)
self.assertEqual(distribution_without_dropout, distribution_with_dropout)

dropout_question.counts_for_grade = True
dropout_question.save()
cache_results(self.evaluation)

calculated_grade = distribution_to_grade(calculate_average_distribution(self.evaluation))
self.assertAlmostEqual(calculated_grade, 1.5)
# Should raise AssertionError because dropout questionnaire has counts_for_grade=True
with self.assertRaises(AssertionError):
calculate_average_distribution(self.evaluation)


class TestTextAnswerVisibilityInfo(TestCase):
Expand Down
10 changes: 7 additions & 3 deletions evap/results/tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -324,7 +324,7 @@ def average_grade_questions_distribution(results):
[
(unipolarized_distribution(result), result.count_sum)
for result in results
if result.question.is_grade_question
if result.question.is_grade_question and result.question.counts_for_grade
]
)

Expand All @@ -334,7 +334,7 @@ def average_non_grade_rating_questions_distribution(results):
[
(unipolarized_distribution(result), result.count_sum)
for result in results
if result.question.is_non_grade_rating_question
if result.question.is_non_grade_rating_question and result.question.counts_for_grade
]
)

Expand Down Expand Up @@ -393,7 +393,9 @@ def calculate_average_distribution(evaluation):
grouped_results = defaultdict(list)
for contribution_result in get_results(evaluation).contribution_results:
for questionnaire_result in contribution_result.questionnaire_results:
if not questionnaire_result.questionnaire.is_dropout: # dropout questionnaires are not counted
if questionnaire_result.questionnaire.is_dropout: # dropout questionnaires are not counted
assert not any(result.question.counts_for_grade for result in questionnaire_result.question_results)
Comment thread
richardebeling marked this conversation as resolved.
if not questionnaire_result.questionnaire.is_dropout:
grouped_results[contribution_result.contributor].extend(questionnaire_result.question_results)

evaluation_results = grouped_results.pop(None, [])
Expand All @@ -413,6 +415,8 @@ def calculate_average_distribution(evaluation):
),
]
),
# The weight of this contributors grade is supposed to represent the number of students the
# contributor interacted with, which we derive from the max answer count, independently of counts_for_grades.
max(
(result.count_sum for result in contributor_results if result.question.is_rating_question),
default=0,
Expand Down
19 changes: 13 additions & 6 deletions evap/staff/forms.py
Original file line number Diff line number Diff line change
Expand Up @@ -907,22 +907,29 @@ def save(self, commit=True):
class QuestionForm(forms.ModelForm):
class Meta:
model = Question
fields = ("order", "questionnaire", "text_de", "text_en", "type", "allows_additional_textanswers")
fields = (
"order",
"questionnaire",
"text_de",
"text_en",
"type",
"allows_additional_textanswers",
"counts_for_grade",
)
widgets = {
"text_de": forms.Textarea(attrs={"rows": 2}),
"text_en": forms.Textarea(attrs={"rows": 2}),
"order": forms.HiddenInput(),
}

def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.instance.pk and self.instance.type in [QuestionType.TEXT, QuestionType.HEADING]:
self.fields["allows_additional_textanswers"].disabled = True

def clean(self):
super().clean()
questionnaire = self.cleaned_data.get("questionnaire")
if questionnaire and questionnaire.is_dropout:
self.cleaned_data["counts_for_grade"] = False
if self.cleaned_data.get("type") in [QuestionType.TEXT, QuestionType.HEADING]:
self.cleaned_data["allows_additional_textanswers"] = False
self.cleaned_data["counts_for_grade"] = False
Comment thread
jooooosef marked this conversation as resolved.
return self.cleaned_data


Expand Down
Loading