Skip to content

API Reference

📌 Grader Functions


jupygrader.grade_notebooks()

from jupygrader import grade_notebooks

# Grade a list of notebooks
graded_results = grade_notebooks(['path/to/notebook1.ipynb', 'path/to/notebook2.ipynb'])
from jupygrader import grade_notebooks, GradingItem

item1 = {
    "notebook_path": "path/to/notebook1.ipynb",
    "output_path": "path/to/output1",
    "copy_files": ["data1.csv"],
}

item2 = {
    "notebook_path": "path/to/notebook2.ipynb",
    "output_path": None,  # Will default to the notebook's parent directory
    "copy_files": {
        "data/population.csv": "another/path/population.csv",
    },
}

graded_results = grade_notebooks(
    [item1, item2],
    execution_timeout=300  # Set execution timeout to 300 seconds (5 minutes)
)

Grade multiple Jupyter notebooks with test cases.

Processes a list of notebook grading items, executes each notebook in a clean environment, evaluates test cases, and produces graded outputs. Can handle both simple file paths and complex grading configurations.

Parameters:

Name Type Description Default
grading_items List[Union[FilePath, GradingItem, dict]]

List of items to grade, which can be: - Strings or Path objects with paths to notebook files - GradingItem objects with detailed grading configuration - Dictionaries that can be converted to GradingItem objects

required
base_files Optional[Union[FilePath, List[FilePath], FileDict]]

Optional files to include in all grading environments. Can be: - A single file path (string or Path) - A list of file paths - A dictionary mapping source paths to destination paths

None
verbose bool

Whether to print progress and diagnostic information. Defaults to True.

True
export_csv bool

Whether to export results to CSV file. Defaults to True.

True
csv_output_path Optional[FilePath]

Optional path for the CSV export. If None, uses notebook output directories. Defaults to None.

None
regrade_existing bool

Whether to regrade notebooks even if results already exist. Defaults to False.

False
execution_timeout Optional[int]

Maximum time (in seconds) allowed for notebook execution. Set to None to disable the timeout. Defaults to 600 seconds.

600

Returns:

Type Description
List[GradedResult]

List of GradedResult objects containing detailed results for each notebook.

Raises:

Type Description
TypeError

If an element in grading_items has an unsupported type.

ValueError

If a required path doesn't exist or has invalid configuration.

Examples:

>>> # Grade multiple notebooks with default settings
>>> results = grade_notebooks(["student1.ipynb", "student2.ipynb"])
>>>
>>> # With custom configurations
>>> results = grade_notebooks([
...     GradingItem(notebook_path="student1.ipynb", output_path="results"),
...     GradingItem(notebook_path="student2.ipynb", output_path="results"),
... ], base_files=["data.csv", "helpers.py"], export_csv=True)
Source code in src/jupygrader/grader.py
def grade_notebooks(
    grading_items: List[Union[FilePath, GradingItem, dict]],
    *,
    base_files: Optional[Union[FilePath, List[FilePath], FileDict]] = None,
    verbose: bool = True,
    export_csv: bool = True,
    csv_output_path: Optional[FilePath] = None,
    regrade_existing: bool = False,
    execution_timeout: Optional[int] = 600,
) -> List[GradedResult]:
    """Grade multiple Jupyter notebooks with test cases.

    Processes a list of notebook grading items, executes each notebook in a clean
    environment, evaluates test cases, and produces graded outputs. Can handle both
    simple file paths and complex grading configurations.

    Args:
        grading_items: List of items to grade, which can be:
            - Strings or Path objects with paths to notebook files
            - GradingItem objects with detailed grading configuration
            - Dictionaries that can be converted to GradingItem objects
        base_files: Optional files to include in all grading environments. Can be:
            - A single file path (string or Path)
            - A list of file paths
            - A dictionary mapping source paths to destination paths
        verbose: Whether to print progress and diagnostic information. Defaults to True.
        export_csv: Whether to export results to CSV file. Defaults to True.
        csv_output_path: Optional path for the CSV export. If None, uses notebook
            output directories. Defaults to None.
        regrade_existing: Whether to regrade notebooks even if results already exist.
            Defaults to False.
        execution_timeout: Maximum time (in seconds) allowed for notebook execution.
            Set to None to disable the timeout. Defaults to 600 seconds.

    Returns:
        List of GradedResult objects containing detailed results for each notebook.

    Raises:
        TypeError: If an element in grading_items has an unsupported type.
        ValueError: If a required path doesn't exist or has invalid configuration.

    Examples:
        >>> # Grade multiple notebooks with default settings
        >>> results = grade_notebooks(["student1.ipynb", "student2.ipynb"])
        >>>
        >>> # With custom configurations
        >>> results = grade_notebooks([
        ...     GradingItem(notebook_path="student1.ipynb", output_path="results"),
        ...     GradingItem(notebook_path="student2.ipynb", output_path="results"),
        ... ], base_files=["data.csv", "helpers.py"], export_csv=True)
    """
    batch_config = BatchGradingConfig(
        base_files=base_files,
        verbose=verbose,
        export_csv=export_csv,
        csv_output_path=csv_output_path,
        regrade_existing=regrade_existing,
        execution_timeout=execution_timeout,
    )

    manager = BatchGradingManager(
        grading_items=grading_items, batch_config=batch_config
    )

    return manager.grade()

jupygrader.grade_single_notebook()

from jupygrader import grade_single_notebook

# Grade a single notebook by path
graded_result = grade_single_notebook('path/to/notebook.ipynb')
from jupygrader import grade_single_notebook

# Grade with detailed configuration
item1 = {
    "notebook_path": "path/to/notebook1.ipynb",
    "output_path": "path/to/output1",
    "copy_files": ["data1.csv"],
}
graded_result = grade_single_notebook(item)

Grade a single Jupyter notebook with test cases.

Executes a notebook in a clean environment, evaluates test cases, and produces graded outputs. A convenience wrapper around grade_notebooks() for single notebook grading.

Parameters:

Name Type Description Default
grading_item Union[FilePath, GradingItem, dict]

The notebook to grade, which can be: - A string or Path object with path to notebook file - A GradingItem object with detailed grading configuration - A dictionary that can be converted to a GradingItem object

required
**kwargs

Additional keyword arguments passed to grade_notebooks(): - base_files: Files to include in grading environment - verbose: Whether to print progress information - regrade_existing: Whether to regrade if results exist - csv_output_path: Path for CSV output (if needed) - execution_timeout: Maximum time (in seconds) allowed for notebook execution. Set to None to disable the timeout.

{}

Returns:

Type Description
Optional[GradedResult]

GradedResult object containing detailed results, or None if grading failed.

Raises:

Type Description
TypeError

If grading_item has an unsupported type.

ValueError

If a required path doesn't exist or has invalid configuration.

Examples:

>>> # Grade a notebook with default settings
>>> result = grade_single_notebook("student1.ipynb")
>>> print(f"Score: {result.learner_autograded_score}/{result.max_total_score}")
>>>
>>> # With custom configuration
>>> result = grade_single_notebook(
...     GradingItem(
...         notebook_path="student1.ipynb",
...         output_path="results",
...         copy_files=["data.csv"]
...     ),
...     verbose=True
... )
Source code in src/jupygrader/grader.py
def grade_single_notebook(
    grading_item: Union[FilePath, GradingItem, dict],
    **kwargs,
) -> Optional[GradedResult]:
    """Grade a single Jupyter notebook with test cases.

    Executes a notebook in a clean environment, evaluates test cases, and produces
    graded outputs. A convenience wrapper around grade_notebooks() for single notebook grading.

    Args:
        grading_item: The notebook to grade, which can be:
            - A string or Path object with path to notebook file
            - A GradingItem object with detailed grading configuration
            - A dictionary that can be converted to a GradingItem object
        **kwargs: Additional keyword arguments passed to grade_notebooks():
            - base_files: Files to include in grading environment
            - verbose: Whether to print progress information
            - regrade_existing: Whether to regrade if results exist
            - csv_output_path: Path for CSV output (if needed)
            - execution_timeout: Maximum time (in seconds) allowed for notebook
              execution. Set to None to disable the timeout.

    Returns:
        GradedResult object containing detailed results, or None if grading failed.

    Raises:
        TypeError: If grading_item has an unsupported type.
        ValueError: If a required path doesn't exist or has invalid configuration.

    Examples:
        >>> # Grade a notebook with default settings
        >>> result = grade_single_notebook("student1.ipynb")
        >>> print(f"Score: {result.learner_autograded_score}/{result.max_total_score}")
        >>>
        >>> # With custom configuration
        >>> result = grade_single_notebook(
        ...     GradingItem(
        ...         notebook_path="student1.ipynb",
        ...         output_path="results",
        ...         copy_files=["data.csv"]
        ...     ),
        ...     verbose=True
        ... )
    """
    kwargs["export_csv"] = False

    r = grade_notebooks([grading_item], **kwargs)

    return r[0] if len(r) > 0 else None

📦 @dataclasses


jupygrader.GradedResult

Complete results of grading a Jupyter notebook.

This comprehensive class stores all information related to grading a notebook, including scores, test case results, execution environment details, and file paths for generated outputs.

Parameters:

Name Type Description Default
filename str

Name of the graded notebook file. Defaults to "".

''
learner_autograded_score Union[int, float]

Points earned from automatically graded test cases. Defaults to 0.

0
max_autograded_score Union[int, float]

Maximum possible points from automatically graded test cases. Defaults to 0.

0
max_manually_graded_score Union[int, float]

Maximum possible points from manually graded test cases. Defaults to 0.

0
max_total_score Union[int, float]

Total maximum possible points across all test cases. Defaults to 0.

0
num_autograded_cases int

Number of automatically graded test cases. Defaults to 0.

0
num_passed_cases int

Number of passed test cases. Defaults to 0.

0
num_failed_cases int

Number of failed test cases. Defaults to 0.

0
num_manually_graded_cases int

Number of test cases requiring manual grading. Defaults to 0.

0
num_total_test_cases int

Total number of test cases in the notebook. Defaults to 0.

0
grading_finished_at str

Timestamp when grading completed. Defaults to "".

''
grading_duration_in_seconds float

Time taken to complete grading. Defaults to 0.0.

0.0
test_case_results List[TestCaseResult]

Detailed results for each individual test case. Defaults to empty list.

list()
submission_notebook_hash str

MD5 hash of the submitted notebook file. Defaults to "".

''
test_cases_hash str

MD5 hash of test case code in the notebook. Defaults to "".

''
grader_python_version str

Python version used for grading. Defaults to "".

''
grader_platform str

Platform information where grading occurred. Defaults to "".

''
jupygrader_version str

Version of Jupygrader used. Defaults to "".

''
extracted_user_code_file Optional[str]

Path to file containing extracted user code. Defaults to None.

None
graded_html_file Optional[str]

Path to HTML output of graded notebook. Defaults to None.

None
text_summary_file Optional[str]

Path to text summary file. Defaults to None.

None
graded_result_json_file Optional[str]

Path to JSON file containing the graded results. Defaults to None.

None
Source code in src/jupygrader/models/grading_dataclasses.py
@dataclass
class GradedResult:
    """Complete results of grading a Jupyter notebook.

    This comprehensive class stores all information related to grading a notebook,
    including scores, test case results, execution environment details, and file paths
    for generated outputs.

    Args:
        filename: Name of the graded notebook file. Defaults to "".
        learner_autograded_score: Points earned from automatically graded test cases. Defaults to 0.
        max_autograded_score: Maximum possible points from automatically graded test cases. Defaults to 0.
        max_manually_graded_score: Maximum possible points from manually graded test cases. Defaults to 0.
        max_total_score: Total maximum possible points across all test cases. Defaults to 0.
        num_autograded_cases: Number of automatically graded test cases. Defaults to 0.
        num_passed_cases: Number of passed test cases. Defaults to 0.
        num_failed_cases: Number of failed test cases. Defaults to 0.
        num_manually_graded_cases: Number of test cases requiring manual grading. Defaults to 0.
        num_total_test_cases: Total number of test cases in the notebook. Defaults to 0.
        grading_finished_at: Timestamp when grading completed. Defaults to "".
        grading_duration_in_seconds: Time taken to complete grading. Defaults to 0.0.
        test_case_results: Detailed results for each individual test case. Defaults to empty list.
        submission_notebook_hash: MD5 hash of the submitted notebook file. Defaults to "".
        test_cases_hash: MD5 hash of test case code in the notebook. Defaults to "".
        grader_python_version: Python version used for grading. Defaults to "".
        grader_platform: Platform information where grading occurred. Defaults to "".
        jupygrader_version: Version of Jupygrader used. Defaults to "".
        extracted_user_code_file: Path to file containing extracted user code. Defaults to None.
        graded_html_file: Path to HTML output of graded notebook. Defaults to None.
        text_summary_file: Path to text summary file. Defaults to None.
        graded_result_json_file: Path to JSON file containing the graded results. Defaults to None.
    """

    filename: str = ""
    learner_autograded_score: Union[int, float] = 0
    max_autograded_score: Union[int, float] = 0
    max_manually_graded_score: Union[int, float] = 0
    max_total_score: Union[int, float] = 0
    num_autograded_cases: int = 0
    num_passed_cases: int = 0
    num_failed_cases: int = 0
    num_manually_graded_cases: int = 0
    num_total_test_cases: int = 0
    grading_finished_at: str = ""
    grading_duration_in_seconds: float = 0.0
    test_case_results: List[TestCaseResult] = field(default_factory=list)
    submission_notebook_hash: str = ""
    test_cases_hash: str = ""
    grader_python_version: str = ""
    grader_platform: str = ""
    jupygrader_version: str = ""
    extracted_user_code_file: Optional[str] = None
    graded_html_file: Optional[str] = None
    text_summary_file: Optional[str] = None
    graded_result_json_file: Optional[str] = None

    @property
    def text_summary(self) -> str:
        summary_parts = [
            f"File: {self.filename}",
            f"Autograded Score: {self.learner_autograded_score} out of {self.max_autograded_score}",
            f"Passed {self.num_passed_cases} out of {self.num_autograded_cases} test cases",
        ]

        if self.num_manually_graded_cases > 0:
            summary_parts.extend(
                [
                    f"{self.num_manually_graded_cases} items will be graded manually.",
                    f"{self.max_manually_graded_score} points are available for manually graded items.",
                    f"{self.max_total_score} total points are available.",
                ]
            )

        summary_parts.append(
            f"Grading took {self.grading_duration_in_seconds:.2f} seconds\n"
        )
        summary_parts.append("Test Case Summary")

        for test_case in self.test_case_results:
            summary_parts.append("-----------------")

            if test_case.grade_manually:
                summary_parts.append(
                    f"{test_case.test_case_name}: requires manual grading, {test_case.available_points} points available"
                )
            else:
                summary_parts.append(
                    f"{test_case.test_case_name}: {'PASS' if test_case.did_pass else 'FAIL'}, {test_case.points} out of {test_case.available_points} points"
                )

                if not test_case.did_pass:
                    summary_parts.extend(
                        ["\n[Autograder Output]", f"{test_case.message}"]
                    )

        return "\n".join(summary_parts)

    @classmethod
    def from_dict(cls, data: dict) -> "GradedResult":
        # Copy the dictionary to avoid modifying the original
        data_copy = data.copy()

        # Remove 'text_summary' if present in the data since it's now a computed property
        if "text_summary" in data_copy:
            del data_copy["text_summary"]

        # Process test_case_results
        test_case_results = [
            TestCaseResult(**item) for item in data_copy.get("test_case_results", [])
        ]
        data_copy["test_case_results"] = test_case_results
        return cls(**data_copy)

    def to_dict(self) -> dict:
        result_dict = asdict(self)

        # Add the computed text_summary to the dictionary
        result_dict["text_summary"] = self.text_summary

        return result_dict

jupygrader.TestCaseResult

Result of an individual test case execution in a notebook.

This class stores the outcome of executing a test case during grading, including the points awarded, whether the test passed, and any output messages generated during execution.

Parameters:

Name Type Description Default
test_case_name str

Unique identifier for the test case. Defaults to "".

''
points Union[int, float]

Points awarded for this test case (0 if failed). Defaults to 0.

0
available_points Union[int, float]

Maximum possible points for this test case. Defaults to 0.

0
did_pass Optional[bool]

Whether the test case passed (True), failed (False), or requires manual grading (None). Defaults to None.

None
grade_manually bool

Whether this test case should be graded manually. Defaults to False.

False
message str

Output message from the test execution, typically contains error information if the test failed. Defaults to "".

''
Source code in src/jupygrader/models/grading_dataclasses.py
@dataclass
class TestCaseResult:
    """Result of an individual test case execution in a notebook.

    This class stores the outcome of executing a test case during grading,
    including the points awarded, whether the test passed, and any output
    messages generated during execution.

    Args:
        test_case_name: Unique identifier for the test case. Defaults to "".
        points: Points awarded for this test case (0 if failed). Defaults to 0.
        available_points: Maximum possible points for this test case. Defaults to 0.
        did_pass: Whether the test case passed (True), failed (False),
            or requires manual grading (None). Defaults to None.
        grade_manually: Whether this test case should be graded manually. Defaults to False.
        message: Output message from the test execution, typically contains
            error information if the test failed. Defaults to "".
    """

    test_case_name: str = ""
    points: Union[int, float] = 0
    available_points: Union[int, float] = 0
    did_pass: Optional[bool] = None  # Can be True, False, or None
    grade_manually: bool = False
    message: str = ""


📌 Notebook Operations


jupygrader.extract_test_case_metadata_from_code()

Extract test case metadata from a code cell string.

Parses a code string to extract test case metadata including the test case name, points value, and whether it requires manual grading. The function looks for specific patterns in the code:

  • _test_case = 'name' (required)
  • _points = value (optional, defaults to 0)
  • _grade_manually = True/False (optional, defaults to False)

Parameters:

Name Type Description Default
code_str str

The source code string to parse for test case metadata

required

Returns:

Type Description
Optional[TestCaseMetadata]

A TestCaseMetadata object with extracted values if a test case is found,

Optional[TestCaseMetadata]

None if a test case is not found

Source code in src/jupygrader/notebook_operations.py
def extract_test_case_metadata_from_code(code_str: str) -> Optional[TestCaseMetadata]:
    """Extract test case metadata from a code cell string.

    Parses a code string to extract test case metadata including the test case name,
    points value, and whether it requires manual grading. The function looks for
    specific patterns in the code:

    - `_test_case = 'name'`  (required)
    - `_points = value`      (optional, defaults to 0)
    - `_grade_manually = True/False`  (optional, defaults to `False`)

    Args:
        code_str: The source code string to parse for test case metadata

    Returns:
        A TestCaseMetadata object with extracted values if a test case is found,
        None if a test case is not found
    """
    tc_result = re.search(test_case_name_pattern, code_str, flags=re.MULTILINE)

    if not tc_result or len(tc_result.groups()) == 0:
        return None

    metadata = TestCaseMetadata(
        test_case_name=tc_result.groups()[0],
        points=0,
        grade_manually=False,
    )

    points_result = re.search(test_case_points_pattern, code_str, flags=re.MULTILINE)

    # if the test case code cell does not include _points
    # no points will be assigned (default of zero)
    if points_result and len(tc_result.groups()) > 0:
        metadata.points = float(points_result.groups()[0])

    manual_grading_result = re.search(
        manual_grading_pattern, code_str, flags=re.MULTILINE
    )

    if manual_grading_result and len(manual_grading_result.groups()) > 0:
        metadata.grade_manually = bool(manual_grading_result.groups()[0])

    return metadata

jupygrader.extract_test_cases_metadata_from_notebook()

Extract metadata from all test cases in a notebook.

Iterates through all code cells in the notebook and identifies test case cells by looking for specific pattern markers. For each test case found, extracts the metadata into a TestCaseMetadata object.

Parameters:

Name Type Description Default
nb NotebookNode

The notebook to extract test case metadata from

required

Returns:

Type Description
List[TestCaseMetadata]

A list of TestCaseMetadata objects for all test cases found in the notebook

Source code in src/jupygrader/notebook_operations.py
def extract_test_cases_metadata_from_notebook(
    nb: NotebookNode,
) -> List[TestCaseMetadata]:
    """Extract metadata from all test cases in a notebook.

    Iterates through all code cells in the notebook and identifies test case cells
    by looking for specific pattern markers. For each test case found, extracts
    the metadata into a `TestCaseMetadata` object.

    Args:
        nb: The notebook to extract test case metadata from

    Returns:
        A list of TestCaseMetadata objects for all test cases found in the notebook
    """
    metadata_list: List[TestCaseMetadata] = []

    for cell in nb.cells:
        if cell.cell_type == "code":
            test_case_metadata = extract_test_case_metadata_from_code(cell.source)

            if test_case_metadata:
                metadata_list.append(test_case_metadata)

    return metadata_list

jupygrader.does_cell_contain_test_case()

Determine if a notebook cell contains a test case.

A cell is considered a test case if it contains the pattern '_test_case = "name"'. This function uses a regular expression to check for this pattern.

Parameters:

Name Type Description Default
cell NotebookNode

The notebook cell to check

required

Returns:

Type Description
bool

True if the cell contains a test case pattern, False otherwise

Source code in src/jupygrader/notebook_operations.py
def does_cell_contain_test_case(cell: NotebookNode) -> bool:
    """Determine if a notebook cell contains a test case.

    A cell is considered a test case if it contains the pattern '_test_case = "name"'.
    This function uses a regular expression to check for this pattern.

    Args:
        cell: The notebook cell to check

    Returns:
        True if the cell contains a test case pattern, False otherwise
    """
    search_result = re.search(test_case_name_pattern, cell.source, flags=re.MULTILINE)

    return search_result and (len(search_result.groups()) > 0)

jupygrader.is_manually_graded_test_case()

Determine if a notebook cell contains a manually graded test case.

A test case is considered manually graded if it contains the pattern '_grade_manually = True'. This function checks for this specific pattern in the cell's source code.

Parameters:

Name Type Description Default
cell NotebookNode

The notebook cell to check

required

Returns:

Type Description
bool

True if the cell is a manually graded test case, False otherwise

Source code in src/jupygrader/notebook_operations.py
def is_manually_graded_test_case(cell: NotebookNode) -> bool:
    """Determine if a notebook cell contains a manually graded test case.

    A test case is considered manually graded if it contains the pattern
    '_grade_manually = True'. This function checks for this specific pattern
    in the cell's source code.

    Args:
        cell: The notebook cell to check

    Returns:
        True if the cell is a manually graded test case, False otherwise
    """
    search_result = re.search(manual_grading_pattern, cell.source, flags=re.MULTILINE)

    return search_result and (len(search_result.groups()) > 0)

jupygrader.extract_user_code_from_notebook()

Extract user code from a notebook.

Collects all code from non-test-case code cells in the notebook.

Parameters:

Name Type Description Default
nb NotebookNode

The notebook to extract code from

required

Returns:

Type Description
str

String containing all user code concatenated with newlines

Source code in src/jupygrader/notebook_operations.py
def extract_user_code_from_notebook(nb: NotebookNode) -> str:
    """Extract user code from a notebook.

    Collects all code from non-test-case code cells in the notebook.

    Args:
        nb: The notebook to extract code from

    Returns:
        String containing all user code concatenated with newlines
    """
    full_code = ""

    for cell in nb.cells:
        if (
            (cell.cell_type == "code")
            and not does_cell_contain_test_case(cell)
            and cell.source
        ):
            full_code += cell.source + "\n\n"

    return full_code

jupygrader.remove_code_cells_that_contain()

Source code in src/jupygrader/notebook_operations.py
def remove_code_cells_that_contain(
    nb: NotebookNode, search_str: Union[str, List[str]]
) -> NotebookNode:
    if isinstance(search_str, str):
        search_list = [search_str]
    else:
        search_list = search_str

    nb.cells = [
        cell
        for cell in nb.cells
        if not (cell.cell_type == "code" and any(s in cell.source for s in search_list))
    ]
    return nb

jupygrader.remove_comments()

Remove comments from Python source code.

Removes both single line comments (starting with #) and multi-line comments (/ ... /), while preserving strings.

Parameters:

Name Type Description Default
source str

Python source code as string

required

Returns:

Type Description
str

Source code with comments removed

Source code in src/jupygrader/notebook_operations.py
def remove_comments(source: str) -> str:
    """Remove comments from Python source code.

    Removes both single line comments (starting with #) and
    multi-line comments (/* ... */), while preserving strings.

    Args:
        source: Python source code as string

    Returns:
        Source code with comments removed
    """
    pattern = r"(\".*?\"|\'.*?\')|(/\*.*?\*/|#[^\r\n]*$)"
    # first group captures quoted strings (double or single)
    # second group captures comments (# single-line or /* multi-line */)
    regex = re.compile(pattern, re.MULTILINE | re.DOTALL)

    def _replacer(match):
        # if the 2nd group (capturing comments) is not None,
        # it means we have captured a non-quoted (real) comment string.
        if match.group(2) is not None:
            return ""  # so we will return empty to remove the comment
        else:  # otherwise, we will return the 1st group
            return match.group(1)  # captured quoted-string

    return regex.sub(_replacer, source)

jupygrader.get_test_cases_hash()

Generate a hash of all test cases in a notebook.

Creates a standardized representation of all test case cells by removing comments and formatting with Black, then generates an MD5 hash.

Parameters:

Name Type Description Default
nb NotebookNode

The notebook to generate a hash for

required

Returns:

Type Description
str

MD5 hash string representing the test cases

Source code in src/jupygrader/notebook_operations.py
def get_test_cases_hash(nb: NotebookNode) -> str:
    """Generate a hash of all test cases in a notebook.

    Creates a standardized representation of all test case cells by
    removing comments and formatting with Black, then generates an MD5 hash.

    Args:
        nb: The notebook to generate a hash for

    Returns:
        MD5 hash string representing the test cases
    """
    test_cases_code = ""

    for cell in nb.cells:
        if (cell.cell_type == "code") and does_cell_contain_test_case(cell):
            # standardize code before hasing
            # by removing comments and formatting the code using the Black formatter
            standardized_code = remove_comments(cell.source)
            standardized_code = black.format_str(standardized_code, mode=black.Mode())

            # concatenate to test_cases_code
            test_cases_code += standardized_code

    # generate an MD5 hash
    hash_str = hashlib.md5(test_cases_code.encode("utf-8")).hexdigest()
    return hash_str