Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion DESCRIPTION
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
Package: gradeR
Title: Helps Grade Assignment Submissions in common R formats
Version: 2.0.0
Version: 2.0.1
Authors@R: c(
person("Taylor", "Brown", email = "trb5me@virginia.edu", role = c("aut", "cre")),
person("Pete", "Benbow", email = "pebenbow@davidson.edu", role = "ctb",
Expand Down
1 change: 1 addition & 0 deletions NAMESPACE
Original file line number Diff line number Diff line change
Expand Up @@ -4,3 +4,4 @@ export(calcGrades)
export(calcGradesForGradescope)
export(findBadEncodingFiles)
export(findGlobalPaths)
export(getTestScriptReport)
17 changes: 17 additions & 0 deletions NEWS.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,20 @@
# gradeR 2.0.1

## Enhancements

Added a new function, `getTestScriptReport`, that counts the number of test cases in a given test script and produces a formatted score report. This should allow instructors to audit their test scripts and determine maximum possible point values when configuring their autograder assignments.

# gradeR 2.0.0

## Enhancements

`calcGradesForGradescope` has been substantially rewritten:

- Can now process R Markdown and Quarto files in addition to R scripts.
- Messages for individual test cases can now be customized using the `label` property of any `expect_*` function within the `testthat` package.
- Point values for individual tests can now be customized by adding strings like "[2pts]" to the aforementioned `label` property.
- Code has been refactored into smaller reusable pieces for better testing.

# gradeR 1.0.10

## Bug fixes
Expand Down
184 changes: 184 additions & 0 deletions R/grade.r
Original file line number Diff line number Diff line change
Expand Up @@ -438,6 +438,190 @@ calcGradesForGradescope <- function(submission_file,
}



#' The function for analyzing and summarizing R test scripts.
#'
#' This function scans a given test script and summarizes the number of tests and test criteria in the script, as well as point values.
#' @param script_path the name of the .r file containing tests tests (e.g. "hw1_tests.R")
#' @keywords getTestScriptReport getPrettyReport
#' @export
getTestScriptReport <- function(script_path) {
# Read and parse the R script
script_content <- readLines(script_path, warn = FALSE)
parsed <- parse(text = script_content, keep.source = TRUE)

# Initialize counters
test_count <- 0
criteria_per_test <- list()
expect_function_counts <- list()
total_points <- 0
test_details <- list()

# Helper function to extract points from label
extract_points <- function(label_expr) {
if (is.null(label_expr)) return(1)

# Evaluate the label expression if it's a call
label_text <- tryCatch({
if (is.character(label_expr)) {
label_expr
} else {
as.character(label_expr)
}
}, error = function(e) "")

# Look for pattern like [2pts] or [3pts]
match <- regexpr("\\[(\\d+)pts?\\]", label_text, ignore.case = TRUE)
if (match > 0) {
points_str <- regmatches(label_text, match)
points <- as.numeric(gsub("\\[|pts?\\]", "", points_str, ignore.case = TRUE))
return(points)
}
return(1) # Default value
}

# Helper function to recursively find expect_ calls
find_expect_calls <- function(expr) {
expect_calls <- list()

if (is.call(expr)) {
func_name <- as.character(expr[[1]])

# Check if this is an expect_ function
if (grepl("^expect_", func_name)) {
# Extract label argument if present
args <- as.list(expr)
label_arg <- NULL
points <- 1

if ("label" %in% names(args)) {
label_arg <- args$label
points <- extract_points(label_arg)
}

expect_calls[[length(expect_calls) + 1]] <- list(
function_name = func_name,
points = points,
label = label_arg
)
}

# Recursively search in all arguments (only if there are arguments)
if (length(expr) > 1) {
args_list <- as.list(expr[-1])
for (arg in args_list) {
expect_calls <- c(expect_calls, find_expect_calls(arg))
}
}
}

return(expect_calls)
}

# Parse the expression tree
for (i in seq_along(parsed)) {
expr <- parsed[[i]]

if (is.call(expr) && as.character(expr[[1]]) == "test_that") {
test_count <- test_count + 1

# Extract test description
test_desc <- if (length(expr) >= 2) as.character(expr[[2]]) else "Unnamed test"

# Find all expect_ calls within this test_that (check if body exists)
expect_calls <- if (length(expr) >= 3) {
find_expect_calls(expr[[3]])
} else {
list()
}

# Count criteria for this test
criteria_count <- length(expect_calls)
criteria_per_test[[test_count]] <- criteria_count

# Sum points for this test
#test_points <- sum(sapply(expect_calls, function(x) x$points))
test_points <- if (length(expect_calls) > 0) {
sum(sapply(expect_calls, function(x) x$points))
} else {
0
}

# Count each expect_ function type
for (call in expect_calls) {
func_name <- call$function_name
if (is.null(expect_function_counts[[func_name]])) {
expect_function_counts[[func_name]] <- 0
}
expect_function_counts[[func_name]] <- expect_function_counts[[func_name]] + 1
}

# Add to total points
total_points <- total_points + test_points

# Store test details
test_details[[test_count]] <- list(
description = test_desc,
criteria_count = criteria_count,
points = test_points,
expect_calls = expect_calls
)
}
}

# Generate summary report
report <- list(
summary = list(
total_tests = test_count,
total_criteria = sum(unlist(criteria_per_test)),
total_points = total_points
),
criteria_per_test = criteria_per_test,
expect_function_counts = expect_function_counts,
test_details = test_details
)

pretty_report <- getPrettyReport(report, script_path)

return(pretty_report)
}

#' The function prints a formatted test script analysis.
#'
#' This function is used inside the getTestScriptReport function to produce a formatted report as output.
#' @param report a list object that is produced from the getTestScriptReport function
#' @param script_path a string representing the relative path to the test script
#' @keywords internal
getPrettyReport <- function(report, script_path) {
cat("=", rep("=", 60), "=\n", sep = "")
cat("TEST SCRIPT ANALYSIS REPORT\n")
cat("=", rep("=", 60), "=\n\n", sep = "")

cat(sprintf("Script: %s\n\n", script_path))

cat("SUMMARY:\n")
cat(sprintf(" Total test_that calls: %d\n", report$summary$total_tests))
cat(sprintf(" Total criteria: %d\n", report$summary$total_criteria))
cat(sprintf(" Total points: %d\n\n", report$summary$total_points))

cat("EXPECT_ FUNCTION USAGE:\n")
for (func in names(report$expect_function_counts)) {
cat(sprintf(" %s: %d\n", func, report$expect_function_counts[[func]]))
}
cat("\n")

cat("TEST DETAILS:\n")
for (i in seq_along(report$test_details)) {
test <- report$test_details[[i]]
cat(sprintf(" Test %d: %s\n", i, test$description))
cat(sprintf(" Criteria: %d\n", test$criteria_count))
cat(sprintf(" Points: %d\n", test$points))
}

cat("\n", rep("=", 62), "\n", sep = "")
}


# #' The averaging function.
# #'
# #' This function calculates each student's course average based on all of their assignments.
Expand Down
17 changes: 17 additions & 0 deletions man/getPrettyReport.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

16 changes: 16 additions & 0 deletions man/getTestScriptReport.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading