diff --git a/evap/evaluation/tools.py b/evap/evaluation/tools.py index 8ff1cb777b..e3f0c8fe7e 100644 --- a/evap/evaluation/tools.py +++ b/evap/evaluation/tools.py @@ -295,6 +295,7 @@ def content(self, value): class ExcelExporter(ABC): styles = { "default": xlwt.Style.default_style, + "missing_average": xlwt.Style.default_style, "headline": xlwt.easyxf( "font: bold on, height 400; alignment: horiz centre, vert centre, wrap on; borders: bottom medium", num_format_str="0.0", diff --git a/evap/results/exporters.py b/evap/results/exporters.py index bc48e4d746..53da65d366 100644 --- a/evap/results/exporters.py +++ b/evap/results/exporters.py @@ -41,6 +41,9 @@ class ResultsExporter(ExcelExporter): "evaluation": xlwt.easyxf( "alignment: horiz centre, wrap on, rota 90; borders: left medium, top medium, right medium, bottom medium" ), + "average": xlwt.easyxf( + "alignment: horiz centre, wrap on, rota 90; borders: left medium, top medium, right medium, bottom medium; font: italic on" + ), "total_voters": xlwt.easyxf("alignment: horiz centre; borders: left medium, right medium"), "evaluation_rate": xlwt.easyxf("alignment: horiz centre; borders: left medium, bottom medium, right medium"), "evaluation_weight": xlwt.easyxf("alignment: horiz centre; borders: left medium, right medium"), @@ -114,24 +117,29 @@ def filter_text_and_heading_questions(questions: Iterable[Question]) -> list[Que return filtered_questions @staticmethod - def filter_evaluations( - semesters: Iterable[Semester], - evaluation_states: Iterable[Evaluation.State], - program_ids: Iterable[int], - course_type_ids: Iterable[int], - contributor: UserProfile | None, - include_not_enough_voters: bool, + def filter_evaluations( # noqa: PLR0912 + semesters: Iterable[Semester] | None = None, + evaluation_states: Iterable[Evaluation.State] | None = None, + program_ids: Iterable[int] | None = None, + course_type_ids: Iterable[int] | None = None, + contributor: UserProfile | None = None, + include_not_enough_voters: bool = False, ) -> tuple[list[tuple[Evaluation, OrderedDict[int, list[QuestionResult]]]], list[Questionnaire], bool]: # pylint: disable=too-many-locals course_results_exist = False evaluations_with_results = [] used_questionnaires: set[Questionnaire] = set() - evaluations_filter = Q( - course__semester__in=semesters, - state__in=evaluation_states, - course__programs__in=program_ids, - course__type__in=course_type_ids, - ) + + evaluations_filter = Q() + if semesters is not None: + evaluations_filter &= Q(course__semester__in=semesters) + if evaluation_states is not None: + evaluations_filter &= Q(state__in=evaluation_states) + if program_ids is not None: + evaluations_filter &= Q(course__programs__in=program_ids) + if course_type_ids is not None: + evaluations_filter &= Q(course__type__in=course_type_ids) + if contributor: evaluations_filter = evaluations_filter & ( Q(course__responsibles__in=[contributor]) | Q(contributions__contributor__in=[contributor]) @@ -198,6 +206,10 @@ def write_headings_and_evaluation_info( else: self.write_cell(export_name, "headline") + self.write_cell( + _("Average result for this question over all published evaluations in all semesters"), "average" + ) + for evaluation, __ in evaluations_with_results: title = evaluation.full_name if len(semesters) > 1: @@ -208,17 +220,19 @@ def write_headings_and_evaluation_info( self.next_row() self.write_cell(_("Programs"), "bold") + self.write_cell("", "program") # empty cell in grade-average column for evaluation, __ in evaluations_with_results: self.write_cell("\n".join([d.name for d in evaluation.course.programs.all()]), "program") self.next_row() self.write_cell(_("Course Type"), "bold") + self.write_cell("", "border_left_right") # empty cell in grade-average column for evaluation, __ in evaluations_with_results: self.write_cell(evaluation.course.type.name, "border_left_right") self.next_row() - # One more cell is needed for the question column - self.write_empty_row_with_styles(["default"] + ["border_left_right"] * len(evaluations_with_results)) + # One column for the question, one column for the average, n columns for the evaluations + self.write_empty_row_with_styles(["default"] + ["border_left_right"] * (len(evaluations_with_results) + 1)) def write_overall_results( self, @@ -228,14 +242,17 @@ def write_overall_results( annotated_evaluations = [e for e, __ in evaluations_with_results] self.write_cell(_("Overall Average Grade"), "bold") + self.write_cell("", "border_left_right") averages = (distribution_to_grade(calculate_average_distribution(e)) for e in annotated_evaluations) self.write_row(averages, lambda avg: self.grade_to_style(avg) if avg else "border_left_right") self.write_cell(_("Total voters/Total participants"), "bold") + self.write_cell("", "total_voters") voter_ratios = (f"{e.num_voters}/{e.num_participants}" for e in annotated_evaluations) self.write_row(voter_ratios, style="total_voters") self.write_cell(_("Evaluation rate"), "bold") + self.write_cell("", "evaluation_rate") # round down like in progress bar participant_percentages = ( f"{int((e.num_voters / e.num_participants) * 100) if e.num_participants > 0 else 0}%" @@ -247,12 +264,13 @@ def write_overall_results( # Only query the number of evaluations once and keep track of it here. count_gt_1: list[bool] = [e.course_evaluations_count > 1 for e in annotated_evaluations] - # Borders only if there is a course grade below. Offset by one column + # Borders only if there is a course grade below. Offset by one column for column title and one for average self.write_empty_row_with_styles( - ["default"] + ["border_left_right" if gt1 else "default" for gt1 in count_gt_1] + ["default", "default"] + ["border_left_right" if gt1 else "default" for gt1 in count_gt_1] ) self.write_cell(_("Evaluation weight"), "bold") + self.write_cell("", "missing_average") weight_percentages = ( f"{e.weight_percentage}%" if gt1 else None for e, gt1 in zip(annotated_evaluations, count_gt_1, strict=True) @@ -260,6 +278,7 @@ def write_overall_results( self.write_row(weight_percentages, lambda s: "evaluation_weight" if s is not None else "default") self.write_cell(_("Course Grade"), "bold") + self.write_cell("", "missing_average") for evaluation, gt1 in zip(annotated_evaluations, count_gt_1, strict=True): if not gt1: self.write_cell() @@ -271,58 +290,118 @@ def write_overall_results( self.next_row() # Same reasoning as above. - self.write_empty_row_with_styles(["default"] + ["border_top" if gt1 else "default" for gt1 in count_gt_1]) + self.write_empty_row_with_styles( + ["default", "default"] + ["border_top" if gt1 else "default" for gt1 in count_gt_1] + ) + + @classmethod + def _get_average_grade_and_approval_ratio( + cls, questionnaire_id: int, question: Question, results: OrderedDict[int, list[QuestionResult]] + ) -> tuple[float | None, float | None]: + value_sum = 0.0 + count_sum = 0 + approval_count = 0 + + for grade_result in results[questionnaire_id]: + if grade_result.question.id != question.id or not RatingResult.has_answers(grade_result): + continue + + value_sum += grade_result.average * grade_result.count_sum + count_sum += grade_result.count_sum + if question.is_yes_no_question: + approval_count += grade_result.approval_count + + if not count_sum: + return None, None + + avg = value_sum / count_sum + if question.is_yes_no_question: + average_approval_ratio = approval_count / count_sum if count_sum > 0 else 0 + return avg, average_approval_ratio + return avg, None + + @classmethod + def _get_average_of_average_grade_and_approval_ratio( + cls, + evaluations_with_results: list[tuple[Evaluation, OrderedDict[int, list[QuestionResult]]]], + questionnaire_id: int, + question: Question, + ) -> tuple[float | None, float | None]: + avg_value_sum = 0.0 + count_avg = 0 + avg_approval_sum = 0.0 + count_approval = 0 + + for __, results in evaluations_with_results: + if ( + results.get(questionnaire_id) is None + ): # we iterate over all distinct questionnaires from all evaluations but some evaluations do not include a specific questionnaire + continue + avg, average_approval_ratio = cls._get_average_grade_and_approval_ratio(questionnaire_id, question, results) + if avg is not None: + avg_value_sum += avg + count_avg += 1 + if average_approval_ratio is not None: + avg_approval_sum += average_approval_ratio + count_approval += 1 + + avg_value = avg_value_sum / count_avg if count_avg else None + avg_approval = avg_approval_sum / count_approval if count_approval else None + return avg_value, avg_approval def write_questionnaire( self, questionnaire: Questionnaire, evaluations_with_results: list[tuple[Evaluation, OrderedDict[int, list[QuestionResult]]]], contributor: UserProfile | None, + all_evaluations_with_results: list[tuple[Evaluation, OrderedDict[int, list[QuestionResult]]]], ) -> None: if contributor and questionnaire.type == Questionnaire.Type.CONTRIBUTOR: self.write_cell(f"{questionnaire.public_name} ({contributor.full_name})", "bold") else: self.write_cell(questionnaire.public_name, "bold") + self.write_cell("", "border_left_right") + # first cell of row is printed above self.write_empty_row_with_styles(["border_left_right"] * len(evaluations_with_results)) for question in self.filter_text_and_heading_questions(questionnaire.questions.all()): self.write_cell(question.text, "italic" if question.is_heading_question else "default") + average_grade, approval_ratio = self._get_average_of_average_grade_and_approval_ratio( + all_evaluations_with_results, questionnaire.id, question + ) + if approval_ratio is not None and average_grade is not None: + self.write_cell(f"{approval_ratio:.0%}", self.grade_to_style(average_grade)) + elif average_grade is not None: + self.write_cell(average_grade, self.grade_to_style(average_grade)) + else: + self.write_cell("", "border_left_right") + + # evaluations for __, results in evaluations_with_results: if questionnaire.id not in results or question.is_heading_question: self.write_cell(style="border_left_right") continue - values = [] - count_sum = 0 - approval_count = 0 - - for grade_result in results[questionnaire.id]: - if grade_result.question.id != question.id or not RatingResult.has_answers(grade_result): - continue - - values.append(grade_result.average * grade_result.count_sum) - count_sum += grade_result.count_sum - if grade_result.question.is_yes_no_question: - approval_count += grade_result.approval_count + avg, average_approval_ratio = self._get_average_grade_and_approval_ratio( + questionnaire.id, question, results + ) - if not values: + if avg is None: self.write_cell(style="border_left_right") continue - avg = sum(values) / count_sum if question.is_yes_no_question: - percent_approval = approval_count / count_sum if count_sum > 0 else 0 - self.write_cell(f"{percent_approval:.0%}", self.grade_to_style(avg)) + self.write_cell(f"{average_approval_ratio:.0%}", self.grade_to_style(avg)) else: self.write_cell(avg, self.grade_to_style(avg)) self.next_row() - self.write_empty_row_with_styles(["default"] + ["border_left_right"] * len(evaluations_with_results)) + self.write_empty_row_with_styles(["default"] + ["border_left_right"] * (len(evaluations_with_results) + 1)) - # pylint: disable=arguments-differ + # pylint: disable=arguments-differ,too-many-locals def export_impl( self, semesters: QuerySetOrSequence[Semester], @@ -335,6 +414,8 @@ def export_impl( # We want to throw early here, since workbook.save() will throw an IndexError otherwise. assert len(selection_list) > 0 + all_evaluations_with_results, _, _ = self.filter_evaluations(evaluation_states=[Evaluation.State.PUBLISHED]) + for sheet_counter, (program_ids, course_type_ids) in enumerate(selection_list, 1): self.cur_sheet = self.workbook.add_sheet("Sheet " + str(sheet_counter)) self.cur_row = 0 @@ -358,7 +439,9 @@ def export_impl( ) for questionnaire in used_questionnaires: - self.write_questionnaire(questionnaire, evaluations_with_results, contributor) + self.write_questionnaire( + questionnaire, evaluations_with_results, contributor, all_evaluations_with_results + ) self.write_overall_results(evaluations_with_results, course_results_exist) diff --git a/evap/results/tests/test_exporters.py b/evap/results/tests/test_exporters.py index a6daafcd25..c1393e706c 100644 --- a/evap/results/tests/test_exporters.py +++ b/evap/results/tests/test_exporters.py @@ -188,12 +188,12 @@ def test_view_excel_file_sorted(self): # Load responses as Excel files and check for correct sorting workbook = xlrd.open_workbook(file_contents=content_de.read()) - self.assertEqual(workbook.sheets()[0].row_values(0)[1], "A – Evaluation1\n") - self.assertEqual(workbook.sheets()[0].row_values(0)[2], "B – Evaluation2\n") + self.assertEqual(workbook.sheets()[0].row_values(0)[2], "A – Evaluation1\n") + self.assertEqual(workbook.sheets()[0].row_values(0)[3], "B – Evaluation2\n") workbook = xlrd.open_workbook(file_contents=content_en.read()) - self.assertEqual(workbook.sheets()[0].row_values(0)[1], "A – Evaluation2\n") - self.assertEqual(workbook.sheets()[0].row_values(0)[2], "B – Evaluation1\n") + self.assertEqual(workbook.sheets()[0].row_values(0)[2], "A – Evaluation2\n") + self.assertEqual(workbook.sheets()[0].row_values(0)[3], "B – Evaluation1\n") def test_course_type_ordering(self): program = baker.make(Program) @@ -236,8 +236,8 @@ def test_course_type_ordering(self): binary_content.seek(0) workbook = xlrd.open_workbook(file_contents=binary_content.read()) - self.assertEqual(workbook.sheets()[0].row_values(0)[1], evaluation_1.full_name + "\n") - self.assertEqual(workbook.sheets()[0].row_values(0)[2], evaluation_2.full_name + "\n") + self.assertEqual(workbook.sheets()[0].row_values(0)[2], evaluation_1.full_name + "\n") + self.assertEqual(workbook.sheets()[0].row_values(0)[3], evaluation_2.full_name + "\n") course_type_2.order = 0 course_type_2.save() @@ -249,8 +249,8 @@ def test_course_type_ordering(self): binary_content.seek(0) workbook = xlrd.open_workbook(file_contents=binary_content.read()) - self.assertEqual(workbook.sheets()[0].row_values(0)[1], evaluation_2.full_name + "\n") - self.assertEqual(workbook.sheets()[0].row_values(0)[2], evaluation_1.full_name + "\n") + self.assertEqual(workbook.sheets()[0].row_values(0)[2], evaluation_2.full_name + "\n") + self.assertEqual(workbook.sheets()[0].row_values(0)[3], evaluation_1.full_name + "\n") def test_multiple_sheets(self): binary_content = BytesIO() @@ -302,17 +302,17 @@ def test_include_unpublished(self): sheet = self.get_export_sheet( include_unpublished=False, semester=semester, program=program, course_types=course_types ) - self.assertEqual(len(sheet.row_values(0)), 2) - self.assertEqual(sheet.row_values(0)[1][:-1], published_evaluation.full_name) + self.assertEqual(len(sheet.row_values(0)), 3) + self.assertEqual(sheet.row_values(0)[2][:-1], published_evaluation.full_name) # Now, make sure that it appears when wanted sheet = self.get_export_sheet( include_unpublished=True, semester=semester, program=program, course_types=course_types ) - self.assertEqual(len(sheet.row_values(0)), 3) + self.assertEqual(len(sheet.row_values(0)), 4) # These two should be ordered according to evaluation.course.type.order - self.assertEqual(sheet.row_values(0)[1][:-1], published_evaluation.full_name) - self.assertEqual(sheet.row_values(0)[2][:-1], unpublished_evaluation.full_name) + self.assertEqual(sheet.row_values(0)[2][:-1], published_evaluation.full_name) + self.assertEqual(sheet.row_values(0)[3][:-1], unpublished_evaluation.full_name) def test_include_not_enough_voters(self): semester = baker.make(Semester) @@ -341,15 +341,15 @@ def test_include_not_enough_voters(self): # First, make sure that the one with only a single voter does not appear sheet = self.get_export_sheet(semester, program, course_types, include_not_enough_voters=False) - self.assertEqual(len(sheet.row_values(0)), 2) - self.assertEqual(sheet.row_values(0)[1][:-1], enough_voters_evaluation.full_name) + self.assertEqual(len(sheet.row_values(0)), 3) + self.assertEqual(sheet.row_values(0)[2][:-1], enough_voters_evaluation.full_name) # Now, check with the option enabled sheet = self.get_export_sheet(semester, program, course_types, include_not_enough_voters=True) - self.assertEqual(len(sheet.row_values(0)), 3) + self.assertEqual(len(sheet.row_values(0)), 4) self.assertEqual( {enough_voters_evaluation.full_name, not_enough_voters_evaluation.full_name}, - {sheet.row_values(0)[1][:-1], sheet.row_values(0)[2][:-1]}, + {sheet.row_values(0)[2][:-1], sheet.row_values(0)[3][:-1]}, ) def test_no_program_or_course_type(self): @@ -392,7 +392,7 @@ def test_program_course_type_name(self): cache_results(evaluation) sheet = self.get_export_sheet(evaluation.course.semester, program, [course_type.id]) - self.assertEqual(sheet.col_values(1)[1:3], [program.name, course_type.name]) + self.assertEqual(sheet.col_values(2)[1:3], [program.name, course_type.name]) def test_multiple_evaluations(self): semester = baker.make(Semester) @@ -408,7 +408,7 @@ def test_multiple_evaluations(self): sheet = self.get_export_sheet(semester, program, [evaluation1.course.type.id, evaluation2.course.type.id]) - self.assertEqual(set(sheet.row_values(0)[1:]), {evaluation1.full_name + "\n", evaluation2.full_name + "\n"}) + self.assertEqual(set(sheet.row_values(0)[2:]), {evaluation1.full_name + "\n", evaluation2.full_name + "\n"}) def test_correct_grades_and_bottom_numbers(self): program = baker.make(Program) @@ -436,11 +436,11 @@ def test_correct_grades_and_bottom_numbers(self): sheet = self.get_export_sheet(evaluation.course.semester, program, [evaluation.course.type.id]) - self.assertEqual(sheet.row_values(5)[1], 2.0) # question 1 average - self.assertEqual(sheet.row_values(8)[1], 3.0) # question 2 average - self.assertEqual(sheet.row_values(10)[1], 2.5) # Average grade - self.assertEqual(sheet.row_values(11)[1], "5/10") # Voters / Participants - self.assertEqual(sheet.row_values(12)[1], "50%") # Voter percentage + self.assertEqual(sheet.row_values(5)[2], 2.0) # question 1 average + self.assertEqual(sheet.row_values(8)[2], 3.0) # question 2 average + self.assertEqual(sheet.row_values(10)[2], 2.5) # Average grade + self.assertEqual(sheet.row_values(11)[2], "5/10") # Voters / Participants + self.assertEqual(sheet.row_values(12)[2], "50%") # Voter percentage def test_course_grade(self): program = baker.make(Program) @@ -470,9 +470,9 @@ def test_course_grade(self): cache_results(evaluation) sheet = self.get_export_sheet(course.semester, program, [course.type.id]) - self.assertEqual(sheet.row_values(12)[1], expected_average) self.assertEqual(sheet.row_values(12)[2], expected_average) self.assertEqual(sheet.row_values(12)[3], expected_average) + self.assertEqual(sheet.row_values(12)[4], expected_average) def test_yes_no_question_result(self): program = baker.make(Program) @@ -494,8 +494,8 @@ def test_yes_no_question_result(self): cache_results(evaluation) sheet = self.get_export_sheet(evaluation.course.semester, program, [evaluation.course.type.id]) - self.assertEqual(sheet.row_values(5)[0], assignment.question.text) - self.assertEqual(sheet.row_values(5)[1], "67%") + self.assertEqual(sheet.row_values(5)[0], question.text) + self.assertEqual(sheet.row_values(5)[2], "67%") def test_contributor_result_export(self): program = baker.make(Program) @@ -544,24 +544,24 @@ def test_contributor_result_export(self): workbook = xlrd.open_workbook(file_contents=binary_content) self.assertEqual( - workbook.sheets()[0].row_values(0)[1], + workbook.sheets()[0].row_values(0)[2], f"{evaluation_1.full_name}\n{evaluation_1.course.semester.name}\n{contributor.full_name}", ) self.assertEqual( - workbook.sheets()[0].row_values(0)[2], + workbook.sheets()[0].row_values(0)[3], f"{evaluation_2.full_name}\n{evaluation_2.course.semester.name}\n{other_contributor.full_name}", ) self.assertEqual(workbook.sheets()[0].row_values(4)[0], general_questionnaire.public_name) - self.assertEqual(workbook.sheets()[0].row_values(5)[0], general_assignment.question.text) - self.assertEqual(workbook.sheets()[0].row_values(5)[2], 4.0) + self.assertEqual(workbook.sheets()[0].row_values(5)[0], general_question.text) + self.assertEqual(workbook.sheets()[0].row_values(5)[3], 4.0) self.assertEqual( workbook.sheets()[0].row_values(7)[0], f"{contributor_questionnaire.public_name} ({contributor.full_name})", ) - self.assertEqual(workbook.sheets()[0].row_values(8)[0], contributor_assignment.question.text) - self.assertEqual(workbook.sheets()[0].row_values(8)[2], 3.0) + self.assertEqual(workbook.sheets()[0].row_values(8)[0], contributor_question.text) + self.assertEqual(workbook.sheets()[0].row_values(8)[3], 3.0) self.assertEqual(workbook.sheets()[0].row_values(10)[0], "Overall Average Grade") - self.assertEqual(workbook.sheets()[0].row_values(10)[2], 3.25) + self.assertEqual(workbook.sheets()[0].row_values(10)[3], 3.25) def test_text_answer_export(self): evaluation = baker.make(Evaluation, state=Evaluation.State.PUBLISHED, can_publish_text_results=True) @@ -603,6 +603,72 @@ def test_text_answer_export(self): self.assertEqual(sheet.row_values(2)[0], evaluation.course.responsibles_names) # Questions are ordered by questionnaire type, answers keep their order respectively - self.assertEqual(sheet.row_values(3)[0], assignments[0].question.text) - self.assertEqual(sheet.row_values(5)[0], assignments[1].question.text) - self.assertEqual(sheet.row_values(6)[0], assignments[2].question.text) + self.assertEqual(sheet.row_values(3)[0], questions[0].text) + self.assertEqual(sheet.row_values(5)[0], questions[1].text) + self.assertEqual(sheet.row_values(6)[0], questions[2].text) + + def test_total_average(self): + program = baker.make(Program) + + questionnaire_1 = baker.make(Questionnaire, order=1, type=Questionnaire.Type.TOP) + questionnaire_2 = baker.make(Questionnaire, order=4, type=Questionnaire.Type.TOP) + + question_1 = baker.make(Question, type=QuestionType.GRADE, questionnaire=questionnaire_1) + question_2 = baker.make(Question, type=QuestionType.POSITIVE_LIKERT, questionnaire=questionnaire_2) + + evaluation_1 = baker.make( + Evaluation, + course__programs=[program], + state=Evaluation.State.PUBLISHED, + _participant_count=2, + _voter_count=2, + ) + + evaluation_2 = baker.make( + Evaluation, + course__programs=[program], + state=Evaluation.State.PUBLISHED, + _participant_count=3, + _voter_count=3, + ) + + evaluation_1.general_contribution.questionnaires.set([questionnaire_1]) + + make_rating_answer_counters(question_1, evaluation_1.general_contribution, [1, 1, 0, 0, 0]) + + evaluation_2.general_contribution.questionnaires.set([questionnaire_1, questionnaire_2]) + + make_rating_answer_counters(question_1, evaluation_2.general_contribution, [1, 2, 0, 0, 0]) + make_rating_answer_counters(question_2, evaluation_2.general_contribution) + + cache_results(evaluation_1) + cache_results(evaluation_2) + + binary_content = BytesIO() + ResultsExporter().export( + binary_content, + [evaluation_1.course.semester, evaluation_2.course.semester], + [ + ( + [course_program.id for course_program in evaluation_1.course.programs.all()] + + [course_program.id for course_program in evaluation_2.course.programs.all()], + [evaluation_1.course.type.id, evaluation_2.course.type.id], + ) + ], + True, + True, + ) + binary_content.seek(0) + workbook = xlrd.open_workbook(file_contents=binary_content.read()) + + self.assertAlmostEqual(workbook.sheets()[0].row_values(5)[1], 1.5833333333333335) + + self.assertEqual( + workbook.sheets()[0].row_values(0)[1], + "Average result for this question over all published evaluations in all semesters", + ) + + # average for second questionnaire must be the value from evaluation2 (since evaluation1 doesn't have the questionnaire) + self.assertEqual(float(workbook.sheets()[0].row_values(8)[1]), float(workbook.sheets()[0].row_values(8)[3])) + # average field next to the questionnaire title must be empty + self.assertEqual("", workbook.sheets()[0].row_values(8)[2])