diff --git a/.gitignore b/.gitignore index 463236b..d22cb1c 100644 --- a/.gitignore +++ b/.gitignore @@ -171,3 +171,5 @@ logs/* .ruff_cache/ engibench_studies/* +workshops/dcc26/artifacts/* +workshops/dcc26/optional_artifacts/* diff --git a/README.md b/README.md index 6b828b5..0a793fc 100644 --- a/README.md +++ b/README.md @@ -109,6 +109,21 @@ We have some colab notebooks that show how to use some of the EngiBench/EngiOpt * [Example easy model (GAN)](https://colab.research.google.com/github/IDEALLab/EngiOpt/blob/main/example_easy_model.ipynb) * [Example hard model (Diffusion)](https://colab.research.google.com/github/IDEALLab/EngiOpt/blob/main/example_hard_model.ipynb) +## Workshop notebooks + +For the DCC'26 hands-on tutorial flow, see: + +- `workshops/dcc26/participant/00_setup_api_warmup.ipynb` +- `workshops/dcc26/participant/01_train_generate.ipynb` +- `workshops/dcc26/participant/02_evaluate_metrics.ipynb` +- `workshops/dcc26/participant/03_add_new_problem_scaffold.ipynb` + +Facilitator solutions are in: + +- `workshops/dcc26/solutions/` + +See `workshops/dcc26/README.md` for agenda mapping, fallback path, and artifact outputs. + ## Citing diff --git a/pyproject.toml b/pyproject.toml index 36881c4..f267845 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -113,6 +113,10 @@ target-version = "py39" ######################################## LINTING ######################################## [tool.ruff.lint] select = ["ALL"] +exclude = [ + "workshops/dcc26/**/*.ipynb", + "workshops/dcc26/utils/**/*.py", +] ignore = [ "ANN", # flake8-annotations (mypy's job) "COM812", # missing-trailing-comma (conflicts with formatter) diff --git a/workshops/dcc26/NOTEBOOK_PEDAGOGY_BLUEPRINT.md b/workshops/dcc26/NOTEBOOK_PEDAGOGY_BLUEPRINT.md new file mode 100644 index 0000000..19d6299 --- /dev/null +++ b/workshops/dcc26/NOTEBOOK_PEDAGOGY_BLUEPRINT.md @@ -0,0 +1,131 @@ +# DCC26 Notebook Pedagogy Blueprint (Pre-write Source) + +This document is the canonical pre-write for workshop notebooks. Notebooks should be generated from this structure, not authored directly as raw `.ipynb` first. + +## Teaching Design Principles + +1. Every technical step is paired with a markdown teaching cell. +2. Every code section has local context: why, inputs, outputs, checks, failure modes. +3. Benchmark science is explicit: objective, feasibility, diversity, novelty, reproducibility. +4. Discussion prompts are embedded and mapped to workshop breakout questions. +5. Participant and solution tracks share the same pedagogical arc; only implementation detail differs. + +## Common Cell Pattern + +For each section: + +- Purpose: why this step matters for benchmark credibility +- Inputs: what artifacts/variables are required +- Action: code operation performed +- Success check: what output indicates correctness +- Failure modes: common pitfalls and fixes +- Discussion bridge: one reflection question + +--- + +## Notebook 00: Setup + API Warmup + +### Learning objective +Understand EngiBench benchmark contract components and reproducibility controls. + +### Section plan +1. Read-me-first + copy mode + runtime expectation +2. Concept cell: EngiBench vs model libraries +3. Environment bootstrap +4. Reproducibility cell (seed, versions) +5. Problem instantiation (`Beams2D`) + inspection +6. Dataset inspection and shape sanity +7. Render one sample and explain representation +8. Explicit constraint violation check with interpretation +9. Reflection prompts tied to comparability across papers + +### Discussion trigger +Which benchmark settings must be fixed for fair method comparison? + +--- + +## Notebook 01: Train + Generate + +### Learning objective +Implement an EngiOpt model against EngiBench data while preserving evaluation-ready artifacts. + +### Section plan +1. Read-me-first + copy mode + expected runtime +2. Concept cell: inverse design framing, conditional generation assumptions +3. Bootstrap deps and imports +4. Configuration and artifact contract +5. Data subset construction and rationale (runtime vs fidelity) +6. Model definition and optimizer +7. Training loop with diagnostics + expected loss behavior +8. Generation from test conditions +9. Quick feasibility precheck (not final evaluation) +10. Artifact export contract (npy/json/checkpoint/history/curve) +11. Optional W&B logging: train curve, scalar logs, artifact bundle +12. Visual sanity grid +13. Discussion prompt: training loss vs engineering validity mismatch + +### Discussion trigger +Can lower train reconstruction loss worsen simulator objective or feasibility? + +--- + +## Notebook 02: Evaluate + Metrics + +### Learning objective +Run robust benchmark evaluation and interpret trade-offs beyond objective score. + +### Section plan +1. Read-me-first + copy mode + expected runtime +2. Concept cell: why objective-only reporting is incomplete +3. Bootstrap deps and imports +4. Artifact loading strategy (local -> optional W&B -> local auto-build) +5. Per-sample evaluation loop (constraint + simulate) +6. Metric layer: + - objective means and gap + - improvement rate + - feasibility/violation rates + - diversity proxy + - novelty-to-train proxy +7. Export layer: CSV + histogram + scatter + grid +8. Optional W&B evaluation logging (table + images + summary) +9. Interpretation rubric with examples +10. Breakout prompts mapped to workshop proposal + +### Discussion trigger +Which missing metric would change conclusions for your domain? + +--- + +## Notebook 03: Add New Problem Scaffold + +### Learning objective +Understand minimal interface required for a reusable EngiBench-style benchmark problem. + +### Section plan +1. Read-me-first + copy mode +2. Concept cell: benchmark-ready problem checklist +3. Scaffold imports and abstract contract explanation +4. Minimal `Problem` implementation skeleton +5. Toy simulator and constraints +6. Registration/discovery and deterministic behavior +7. Contribution checklist for real domains +8. Reflection prompts on leakage, units, and reproducibility metadata + +### Discussion trigger +What metadata is minimally required so another lab can reproduce your new benchmark? + +--- + +## Participant vs Solution Policy + +- Participant notebooks: keep code TODOs, but each TODO has explicit completion checks and expected outputs. +- Solution notebooks: complete implementations plus concise inline comments for non-obvious logic only. +- Both tracks: keep identical markdown structure for pedagogical alignment. + +## Quality Gate Before Publishing + +1. All code cells compile. +2. Solution Notebook 01+02 execute end-to-end in workshop env. +3. Artifact contract is consistent between Notebook 01 and 02. +4. Copy-safe links use `#copy=true`. +5. Standalone readability check: each notebook understandable without live lecture. diff --git a/workshops/dcc26/README.md b/workshops/dcc26/README.md new file mode 100644 index 0000000..245f9f4 --- /dev/null +++ b/workshops/dcc26/README.md @@ -0,0 +1,104 @@ +# DCC 2026 Workshop Notebook Suite + +This folder contains the DCC'26 hands-on notebook suite for benchmarking AI methods in engineering design with EngiBench and EngiOpt. + +It is split into two tracks: + +- `participant/`: notebooks with guided `PUBLIC FILL-IN` cells for attendees +- `solutions/`: fully completed facilitator notebooks + +## Workshop flow (3.5h) + +- `participant/00_setup_api_warmup.ipynb` and `solutions/00_setup_api_warmup.ipynb` (10-15 min) + - Environment setup + - Problem + dataset inspection + - Rendering and constraint checks + +- `participant/01_train_generate.ipynb` and `solutions/01_train_generate.ipynb` (30 min) + - Lightweight training using `engiopt.cgan_2d.Generator` + - Deterministic seeds + - Artifact export for downstream evaluation (runtime/W&B optional transport) + +- `participant/02_evaluate_metrics.ipynb` and `solutions/02_evaluate_metrics.ipynb` (20 min) + - Constraint validation + - Physics simulation + - Baseline comparison + - Metric and artifact export + +- `participant/03_add_new_problem_scaffold.ipynb` and `solutions/03_add_new_problem_scaffold.ipynb` (25 min) + - Ambitious `Problem` scaffold (`PlanarManipulatorCoDesignProblem`, not currently in EngiBench) + - PyBullet-based robotics co-design simulation and optimization loop + - Mapping to contribution docs + +## Runtime assumptions + +- Primary live problem: `Beams2D` +- No container-dependent problems are required during workshop exercises +- W&B integration is optional and disabled by default + +## Colab setup + +Use `requirements-colab.txt` only as a local convenience snapshot. +The notebook bootstrap cells are the runtime source of truth for Colab. + +All notebooks now include a conditional dependency bootstrap cell: + +- On Colab: installs required packages automatically. +- On local envs: skips install by default (`FORCE_INSTALL = False`). +- Note: notebooks that use EngiOpt install it from the EngiOpt GitHub branch bootstrap. + +## Open in Colab + +Use these `?copy=true` links for workshop sharing so attendees are prompted to create their own Drive copy first. + +- Participant 00: https://colab.research.google.com/github/IDEALLab/EngiOpt/blob/codex/dcc26-workshop-notebooks/workshops/dcc26/participant/00_setup_api_warmup.ipynb?copy=true +- Participant 01: https://colab.research.google.com/github/IDEALLab/EngiOpt/blob/codex/dcc26-workshop-notebooks/workshops/dcc26/participant/01_train_generate.ipynb?copy=true +- Participant 02: https://colab.research.google.com/github/IDEALLab/EngiOpt/blob/codex/dcc26-workshop-notebooks/workshops/dcc26/participant/02_evaluate_metrics.ipynb?copy=true +- Participant 03: https://colab.research.google.com/github/IDEALLab/EngiOpt/blob/codex/dcc26-workshop-notebooks/workshops/dcc26/participant/03_add_new_problem_scaffold.ipynb?copy=true +- Solution 00: https://colab.research.google.com/github/IDEALLab/EngiOpt/blob/codex/dcc26-workshop-notebooks/workshops/dcc26/solutions/00_setup_api_warmup.ipynb?copy=true +- Solution 01: https://colab.research.google.com/github/IDEALLab/EngiOpt/blob/codex/dcc26-workshop-notebooks/workshops/dcc26/solutions/01_train_generate.ipynb?copy=true +- Solution 02: https://colab.research.google.com/github/IDEALLab/EngiOpt/blob/codex/dcc26-workshop-notebooks/workshops/dcc26/solutions/02_evaluate_metrics.ipynb?copy=true +- Solution 03: https://colab.research.google.com/github/IDEALLab/EngiOpt/blob/codex/dcc26-workshop-notebooks/workshops/dcc26/solutions/03_add_new_problem_scaffold.ipynb?copy=true + +## Output artifacts + +By default, solution notebooks write generated artifacts to: + +- Local/Jupyter: `workshops/dcc26/artifacts/` +- Google Colab runtime: `/content/dcc26_artifacts/` (no Google Drive permission needed) + +Optional: + +- You can enable W&B artifact upload/download in Notebook 01/02 by setting `USE_WANDB_ARTIFACTS = True`. +- Notebook 01 logs training dynamics (`train/loss`) and can upload checkpoint/history/plots as artifact payload. +- Notebook 02 can log evaluation metrics, tables, and figures to W&B. +- W&B is disabled by default so participants can run without account setup or API keys. +- Notebook 02 auto-builds Notebook 01-style artifacts locally with EngiOpt if they are missing (`AUTO_BUILD_ARTIFACTS_IF_MISSING = True`). + +These include: + +- `generated_designs.npy` +- `baseline_designs.npy` +- `conditions.json` +- `engiopt_cgan2d_generator_supervised.pt` +- `training_history.csv` +- `training_curve.png` +- `metrics_summary.csv` +- `objective_histogram.png` +- `objective_scatter.png` +- `design_grid.png` + +## Facilitator fallback policy + +If runtime is constrained: + +1. Skip Notebook 01 and run `02_evaluate_metrics.ipynb`; it can build required artifacts automatically. +2. Or reuse a previously saved checkpoint/artifact set from W&B or local runtime files. +3. Keep `03_add_new_problem_scaffold.ipynb` as the capstone for extensibility. + +## Suggested pre-workshop checks + +1. Run all notebooks once in fresh Colab runtime. +2. Confirm dataset download succeeds. +3. Confirm artifacts are generated in the expected folder. +4. Confirm no cell requires W&B auth unless explicitly enabled. diff --git a/workshops/dcc26/assets/engibench_logo.png b/workshops/dcc26/assets/engibench_logo.png new file mode 100644 index 0000000..6c1f17c Binary files /dev/null and b/workshops/dcc26/assets/engibench_logo.png differ diff --git a/workshops/dcc26/assets/engibench_problems.png b/workshops/dcc26/assets/engibench_problems.png new file mode 100644 index 0000000..4417381 Binary files /dev/null and b/workshops/dcc26/assets/engibench_problems.png differ diff --git a/workshops/dcc26/participant/00_setup_api_warmup.ipynb b/workshops/dcc26/participant/00_setup_api_warmup.ipynb new file mode 100644 index 0000000..11a770c --- /dev/null +++ b/workshops/dcc26/participant/00_setup_api_warmup.ipynb @@ -0,0 +1,494 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "# Welcome to the DCC'26 EngiBench Workshop!\n", + "\n", + "In the next 20 minutes you will **load an engineering-design benchmark, explore its data, and break its constraints on purpose**. No ML required yet — just Python and curiosity.\n", + "\n", + "> **Colab users:** click **File ➜ Save a copy in Drive** before editing so your changes persist." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Install dependencies (Colab / fresh env only)\n", + "\n", + "Skip this if your local environment already has `engibench` installed." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Colab/local dependency bootstrap\n", + "import subprocess, sys\n", + "\n", + "IN_COLAB = \"google.colab\" in sys.modules\n", + "FORCE_INSTALL = False # Set True to force install outside Colab\n", + "\n", + "if IN_COLAB or FORCE_INSTALL:\n", + " def _pip(pkgs): subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", *pkgs])\n", + " _pip([\"engibench[all]\", \"matplotlib\", \"seaborn\", \"ipywidgets\"])\n", + " try:\n", + " import torch\n", + " except Exception:\n", + " _pip([\"torch\", \"torchvision\"])\n", + " print(\"Install complete.\")\n", + "else:\n", + " print(\"Using current environment. Set FORCE_INSTALL=True to install here.\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## What is EngiBench?\n", + "\n", + "\n", + "\n", + "EngiBench is an **open benchmark suite for engineering design** with ML. Three things it gives you:\n", + "\n", + "- **Standardised problems** — beams, heat sinks, photonic crystals, and more, each with the same Python API\n", + "- **Ready-made datasets** — thousands of optimal designs with their operating conditions, hosted on HuggingFace\n", + "- **Built-in evaluation** — constraint checking, simulation, and metrics so results are comparable across papers" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Exercise legend\n", + "\n", + "| Marker | Meaning |\n", + "|---|---|\n", + "| `PUBLIC FILL-IN CELL` | Your turn — edit the code between `START FILL` / `END FILL` |\n", + "| `CHECKPOINT` | Automated check — if it fails, fix before moving on |" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Step 1 — Choose a problem and set up\n", + "\n", + "EngiBench has many problems, all with the **same API**. Pick one by name from the list below.\n", + "\n", + "**Your task:** set `PROBLEM_ID` to one of the available problem strings (we recommend `\"beams2d\"` for this workshop)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# PUBLIC FILL-IN CELL 00-A\n", + "import importlib\n", + "import random, sys, os\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "\n", + "# Import workshop helpers (all visualization code lives here)\n", + "if \"google.colab\" in sys.modules:\n", + " import subprocess\n", + " _utils = \"/content/workshop_utils\"\n", + " os.makedirs(_utils, exist_ok=True)\n", + " _branch = \"codex/dcc26-workshop-notebooks\"\n", + " _base = f\"https://raw.githubusercontent.com/IDEALLab/EngiOpt/{_branch}/workshops/dcc26/utils\"\n", + " for _f in (\"notebook_helpers.py\", \"__init__.py\"):\n", + " if not os.path.exists(f\"{_utils}/{_f}\"):\n", + " subprocess.check_call([\"wget\", \"-q\", f\"{_base}/{_f}\", \"-O\", f\"{_utils}/{_f}\"])\n", + "else:\n", + " _utils = os.path.abspath(\"../utils\") if os.path.isdir(\"../utils\") else \"workshops/dcc26/utils\"\n", + "sys.path.insert(0, _utils)\n", + "import notebook_helpers # noqa: E402\n", + "importlib.reload(notebook_helpers) # always pick up latest edits\n", + "from notebook_helpers import * # noqa: F401,F403\n", + "\n", + "import engibench\n", + "from engibench.utils.all_problems import BUILTIN_PROBLEMS\n", + "\n", + "print(\"Available problems:\", list(BUILTIN_PROBLEMS.keys()))\n", + "\n", + "SEED = 7\n", + "set_global_seed(SEED)\n", + "\n", + "# START FILL ---------------------------------------------------------------\n", + "PROBLEM_ID = None # Pick a problem! Example: \"beams2d\"\n", + "# END FILL -----------------------------------------------------------------\n", + "\n", + "if PROBLEM_ID is None:\n", + " raise RuntimeError('Set PROBLEM_ID to a problem name, e.g. PROBLEM_ID = \"beams2d\"')\n", + "\n", + "# CHECKPOINT\n", + "assert PROBLEM_ID in BUILTIN_PROBLEMS, f'\"{PROBLEM_ID}\" not found. Choose from: {list(BUILTIN_PROBLEMS.keys())}'\n", + "print(f\"\\u2705 Checkpoint passed — using problem: {PROBLEM_ID}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Step 2 — Instantiate the problem\n", + "\n", + "One line. Every EngiBench problem uses the same constructor — just pass a seed for reproducibility." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "problem = BUILTIN_PROBLEMS[PROBLEM_ID](seed=SEED)\n", + "print(\"Problem class:\", type(problem).__name__)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Step 3 — Explore the API contract\n", + "\n", + "Every EngiBench problem exposes the **same fields**. This is what makes the benchmark fair — algorithms can only change the *method*, not the *problem definition*." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Problem class: \", type(problem).__name__)\n", + "print(\"Design space: \", problem.design_space)\n", + "print(\"Design shape: \", problem.design_space.shape)\n", + "print(\"Objectives: \", problem.objectives)\n", + "print(\"Condition keys: \", problem.conditions_keys)\n", + "print(\"Dataset ID: \", problem.dataset_id)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Key takeaway:** `design_space`, `objectives`, and `conditions_keys` are the **contract**. Any method you build must respect them." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Step 4 — Load and inspect a dataset sample\n", + "\n", + "The dataset lives on HuggingFace and downloads automatically. Your job:\n", + "1. Grab one training sample's **design** (a 2D numpy array)\n", + "2. Build a **config** dict mapping each condition key to the sample's value\n", + "\n", + "We give you the dataset loading — you extract the fields." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# PUBLIC FILL-IN CELL 00-B\n", + "# Goal: extract a design array and build a config dict from one training sample.\n", + "\n", + "dataset = problem.dataset # <-- this is provided for you\n", + "print(dataset) # inspect the splits and columns\n", + "\n", + "sample_idx = 0\n", + "\n", + "# START FILL ---------------------------------------------------------------\n", + "# The training split is: dataset[\"train\"]\n", + "# Each column can be indexed like: dataset[\"train\"][\"optimal_design\"][sample_idx]\n", + "#\n", + "# 1) Extract the design as a numpy array:\n", + "# design = np.array(dataset[\"train\"][\"optimal_design\"][sample_idx])\n", + "#\n", + "# 2) Build a config dict with one entry per condition key (use np.asarray to\n", + "# ensure array conditions are numpy arrays, not Python lists):\n", + "# config = {k: np.asarray(dataset[\"train\"][k][sample_idx]) for k in problem.conditions_keys}\n", + "\n", + "design = None\n", + "config = None\n", + "# END FILL -----------------------------------------------------------------\n", + "\n", + "if design is None or config is None:\n", + " raise RuntimeError(\"Uncomment / fill in `design` and `config` above.\")\n", + "\n", + "print(\"design shape:\", np.array(design).shape)\n", + "print(\"config: \", config)\n", + "\n", + "# CHECKPOINT\n", + "assert tuple(np.array(design).shape) == tuple(problem.design_space.shape), (\n", + " f\"design shape mismatch: expected {problem.design_space.shape}, got {np.array(design).shape}\"\n", + ")\n", + "missing = [k for k in problem.conditions_keys if k not in config]\n", + "assert not missing, f\"config missing condition keys: {missing}\"\n", + "print(\"\\u2705 Checkpoint passed — dataset sample loaded correctly.\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Step 5 — Design gallery\n", + "\n", + "Eight random training designs with their conditions. Notice how different conditions produce very different structures." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_design_gallery(dataset, problem, n=8, seed=SEED)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Step 6 — Explore the dataset interactively\n", + "\n", + "**Drag the sliders** to filter designs by condition range. This is the dataset your generative model will learn from." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "interactive_condition_explorer(dataset, problem)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Step 7 — Render a single design\n", + "\n", + "EngiBench problems have a built-in `render()` method that draws the design with physics-aware styling." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fig = problem.render(design)\n", + "fig.suptitle(f\"Design (sample {sample_idx})\")\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Step 8 — Break constraints deliberately\n", + "\n", + "### The EngiBench constraint API\n", + "\n", + "Every EngiBench problem declares **design constraints** — rules a design must satisfy\n", + "to be physically valid. The `@constraint` decorator wraps a function that `assert`s\n", + "what must be true; `check_constraints()` catches failures and returns a `Violations`\n", + "object.\n", + "\n", + "Each constraint is tagged with a **category** that tells you *why* it exists:\n", + "\n", + "| Category | Import | Meaning |\n", + "|---|---|---|\n", + "| **`THEORY`** | `from engibench.constraint import THEORY` | The constraint comes from **physics**. Values outside the domain are unphysical (e.g. negative volume fraction) but may not crash the solver. |\n", + "| **`IMPL`** | `from engibench.constraint import IMPL` | The constraint guards the **implementation**. Violating it causes runtime errors or undefined behavior in the solver (e.g. mesh resolution too small). |\n", + "\n", + "Constraints also have a **criticality** level:\n", + "- `Criticality.Error` — hard violation, design is infeasible\n", + "- `Criticality.Warning` — soft violation, solver may still run but results are suspect\n", + "\n", + "The `Violations` object returned by `check_constraints()` supports filtering:\n", + "```python\n", + "violations.by_category(THEORY) # only physics violations\n", + "violations.by_category(IMPL) # only implementation violations\n", + "violations.by_criticality(Criticality.Warning) # only warnings\n", + "```\n", + "\n", + "For example, in `beams2d` the volume fraction has a `THEORY` constraint (physically,\n", + "volfrac must be in [0, 1]) **and** a stricter `IMPL` warning (the solver works best\n", + "with volfrac in [0.1, 0.9]).\n", + "\n", + "---\n", + "\n", + "### Exercise: force a constraint violation\n", + "\n", + "A design is only valid if it **satisfies all constraints for its operating conditions**. Let's see what happens when we lie about the conditions.\n", + "\n", + "**Your task:** copy the valid `config`, change one **scalar** condition to an extreme value, and call `problem.check_constraints(design=design, config=bad_config)`.\n", + "\n", + "The function returns a `Violations` object — `len(violations) == 0` means no violations." + ] + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "# PUBLIC FILL-IN CELL 00-C\n", + "# Goal: force a constraint violation and inspect the result.\n", + "\n", + "# Find a scalar condition to perturb\n", + "scalar_keys = [k for k in problem.conditions_keys if np.asarray(config[k]).ndim == 0]\n", + "perturb_key = scalar_keys[0]\n", + "original_val = float(config[perturb_key])\n", + "\n", + "# START FILL ---------------------------------------------------------------\n", + "# 1) Make a copy of the valid config and change perturb_key to something extreme:\n", + "# bad_config = dict(config)\n", + "# bad_config[perturb_key] = original_val * 10\n", + "#\n", + "# 2) Check constraints with the mismatched config:\n", + "# violations = problem.check_constraints(design=design, config=bad_config)\n", + "\n", + "bad_config = None\n", + "violations = None\n", + "# END FILL -----------------------------------------------------------------\n", + "\n", + "if violations is None:\n", + " raise RuntimeError(\"Uncomment / fill in `bad_config` and `violations` above.\")\n", + "\n", + "print(f\"Perturbing '{perturb_key}': {original_val} \\u2192 {bad_config[perturb_key]}\")\n", + "violations = problem.check_constraints(design=design, config=bad_config)\n", + "\n", + "print(f\"\\n--- All constraint checks for {PROBLEM_ID} ({violations.n_constraints} total) ---\")\n", + "print(f\"Violations triggered: {len(violations)}\\n\")\n", + "if violations:\n", + " print(violations)\n", + "else:\n", + " print(\"No violations. Try a more extreme value.\")\n", + "\n", + "# CHECKPOINT\n", + "assert hasattr(violations, \"__len__\"), \"violations should be a Violations object\"\n", + "print(\"\\n\\u2705 Checkpoint passed — constraint checking explored.\")" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Step 9 — Simulate and optimise\n", + "\n", + "Beyond constraint checking, EngiBench problems expose **`simulate()`** and **`optimize()`** methods — the same solvers used to generate the dataset.\n", + "\n", + "| Method | What it does | Returns |\n", + "|---|---|---|\n", + "| `problem.simulate(design, config)` | Evaluate objective(s) for a given design | `np.ndarray` of objective values |\n", + "| `problem.optimize(starting_point, config)` | Run the full optimiser from a starting design | `(optimised_design, optimisation_history)` |\n", + "\n", + "> **Colab note:** some problems (e.g. `heatconduction2d`, `heatconduction3d`, `airfoil`) require a **Docker container** for their solver and will not run on Colab. Problems like `beams2d` and `thermoelastic2d` use pure-Python solvers and work everywhere." + ] + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "# Simulate: evaluate the objective for an existing design\n", + "obj_values = problem.simulate(design, config)\n", + "print(f\"Objective values for sample {sample_idx}: {obj_values}\")\n", + "print(f\"Objectives defined: {problem.objectives}\")" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "# Optimize: run the solver from a uniform starting point\n", + "starting_point = np.full(problem.design_space.shape, float(config[\"volfrac\"]))\n", + "optimised_design, history = problem.optimize(starting_point, config)\n", + "\n", + "print(f\"Optimisation ran for {len(history)} steps\")\n", + "print(f\"Final objective: {history[-1].obj_values}\")\n", + "\n", + "# Compare: generated design vs. dataset design\n", + "fig, axes = plt.subplots(1, 2, figsize=(10, 4))\n", + "axes[0].imshow(design, cmap=\"gray_r\", vmin=0, vmax=1)\n", + "axes[0].set_title(f\"Dataset design (obj={obj_values[0]:.4f})\")\n", + "axes[0].axis(\"off\")\n", + "axes[1].imshow(optimised_design, cmap=\"gray_r\", vmin=0, vmax=1)\n", + "axes[1].set_title(f\"Re-optimised (obj={history[-1].obj_values[0]:.4f})\")\n", + "axes[1].axis(\"off\")\n", + "plt.tight_layout()\n", + "plt.show()" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Reflection\n", + "\n", + "Before moving on, think about:\n", + "\n", + "1. **The API contract** — what is *fixed* by the benchmark (design space, conditions, objectives) vs. what is *yours* to choose (model, hyperparameters, training strategy)?\n", + "2. **Constraints as a test** — why is it important that `check_constraints` exists as a separate function, rather than just training on feasible data?\n", + "3. **Simulate vs. optimise** — `simulate` is cheap (one forward pass), `optimize` is expensive (iterative solver). How might you use each when evaluating a generative model?\n", + "4. **What surprised you?** — anything about the design shapes, condition ranges, or dataset size that you did not expect?" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Next\n", + "\n", + "Proceed to **Notebook 01** where you will train a generative model on this exact benchmark and produce new designs. The API you just learned carries over unchanged." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "name": "python", + "version": "3.11.0" + }, + "accelerator": "GPU", + "colab": { + "provenance": [], + "gpuType": "T4" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/workshops/dcc26/participant/01_train_generate.ipynb b/workshops/dcc26/participant/01_train_generate.ipynb new file mode 100644 index 0000000..68b7fe1 --- /dev/null +++ b/workshops/dcc26/participant/01_train_generate.ipynb @@ -0,0 +1,1436 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Notebook 01: Train a Generative Model for Inverse Design\n\n**Can we learn to skip the optimizer?**\n\nIn Notebook 00 you saw that EngiBench bundles an optimizer with every problem.\nRunning that optimizer produces an optimal design — but it takes time. For\nBeams2D it runs in seconds, but for complex 3D problems it can take minutes or\nhours *per design*.\n\nGenerative AI offers a different approach: **train a neural network once on a\ndataset of optimal designs, then generate new designs instantly.** The trade-off\nis quality for speed — and the central question of this workshop is *how do we\nmeasure that trade-off rigorously?*\n\n### What you will do\n\n| Step | What happens | Key concept |\n|------|-------------|-------------|\n| **Prepare data** | Extract conditions and designs from EngiBench | The standardised data API |\n| **Train a model** | Fit a neural network to map conditions → designs | Supervised learning on design data |\n| **Generate designs** | Produce new designs for unseen conditions | Instant inference vs. slow optimization |\n| **Inspect results** | Compare generated vs. ground-truth designs visually | Setting up evaluation (Notebook 02) |\n\n> **Heads up:** We deliberately train a simple model with limited data and few\n> epochs. The results will be imperfect — **that is the point.** Understanding\n> *why* they are imperfect motivates the rigorous benchmarking we explore in\n> Notebook 02 and the discussion session.\n\n---\n\n### Exercise legend\n| Marker | Meaning |\n|---|---|\n| `FILL-IN CELL` | Your turn — edit the code between `START FILL` / `END FILL` |\n| `CHECKPOINT` | Automated check — if it fails, fix before moving on |" + ], + "id": "cell-0" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "> **Colab users:** click **File > Save a copy in Drive** before editing so your changes persist." + ], + "id": "cell-1" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 0. Install dependencies" + ], + "id": "cell-2" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Colab / local dependency bootstrap\nimport subprocess, sys\n\nIN_COLAB = \"google.colab\" in sys.modules\nFORCE_INSTALL = False # Set True to force install outside Colab\n\nif IN_COLAB or FORCE_INSTALL:\n def _pip(pkgs): subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", *pkgs])\n _pip([\"engibench[all]\", \"sqlitedict\", \"matplotlib\", \"tqdm\", \"tyro\", \"wandb\"])\n _pip([\"git+https://github.com/IDEALLab/EngiOpt.git@codex/dcc26-workshop-notebooks#egg=engiopt\"])\n try:\n import torch\n except Exception:\n _pip([\"torch\", \"torchvision\"])\n print(\"Install complete.\")\nelse:\n print(\"Using current environment. Set FORCE_INSTALL=True to install here.\")" + ], + "id": "cell-3" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n\n## The inverse design problem\n\nTraditional topology optimization works like this:\n\n```\nConditions (volfrac, loads, …) ──► [ Optimizer (iterative) ] ──► Optimal design\n ⏱ seconds to hours\n```\n\nA **learned generator** replaces the optimizer with a neural network:\n\n```\nConditions ─┐\n ├──► [ Neural network ] ──► Approximate design\nRandom noise ─┘ ⏱ milliseconds\n```\n\nThe noise input lets the model produce **diverse** designs for the same\nconditions — useful for exploring the design space. But the designs are only\n*approximate*: the network has to generalise from training examples rather than\nsolving the physics directly.\n\n**Key question:** How close can a learned generator get to the optimizer? That\nis what benchmarking measures." + ], + "id": "cell-4" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 1. Imports" + ], + "id": "cell-5" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "i", + "m", + "p", + "o", + "r", + "t", + " ", + "i", + "m", + "p", + "o", + "r", + "t", + "l", + "i", + "b", + "\n", + "i", + "m", + "p", + "o", + "r", + "t", + " ", + "j", + "s", + "o", + "n", + "\n", + "i", + "m", + "p", + "o", + "r", + "t", + " ", + "r", + "a", + "n", + "d", + "o", + "m", + "\n", + "i", + "m", + "p", + "o", + "r", + "t", + " ", + "s", + "y", + "s", + ",", + " ", + "o", + "s", + "\n", + "f", + "r", + "o", + "m", + " ", + "p", + "a", + "t", + "h", + "l", + "i", + "b", + " ", + "i", + "m", + "p", + "o", + "r", + "t", + " ", + "P", + "a", + "t", + "h", + "\n", + "\n", + "i", + "m", + "p", + "o", + "r", + "t", + " ", + "m", + "a", + "t", + "p", + "l", + "o", + "t", + "l", + "i", + "b", + ".", + "p", + "y", + "p", + "l", + "o", + "t", + " ", + "a", + "s", + " ", + "p", + "l", + "t", + "\n", + "i", + "m", + "p", + "o", + "r", + "t", + " ", + "n", + "u", + "m", + "p", + "y", + " ", + "a", + "s", + " ", + "n", + "p", + "\n", + "i", + "m", + "p", + "o", + "r", + "t", + " ", + "t", + "o", + "r", + "c", + "h", + " ", + "a", + "s", + " ", + "t", + "h", + "\n", + "\n", + "#", + " ", + "W", + "o", + "r", + "k", + "s", + "h", + "o", + "p", + " ", + "h", + "e", + "l", + "p", + "e", + "r", + "s", + " ", + "(", + "v", + "i", + "s", + "u", + "a", + "l", + "i", + "z", + "a", + "t", + "i", + "o", + "n", + " ", + "+", + " ", + "t", + "r", + "a", + "i", + "n", + "i", + "n", + "g", + " ", + "u", + "t", + "i", + "l", + "i", + "t", + "i", + "e", + "s", + ")", + "\n", + "i", + "f", + " ", + "\"", + "g", + "o", + "o", + "g", + "l", + "e", + ".", + "c", + "o", + "l", + "a", + "b", + "\"", + " ", + "i", + "n", + " ", + "s", + "y", + "s", + ".", + "m", + "o", + "d", + "u", + "l", + "e", + "s", + ":", + "\n", + " ", + " ", + " ", + " ", + "i", + "m", + "p", + "o", + "r", + "t", + " ", + "s", + "u", + "b", + "p", + "r", + "o", + "c", + "e", + "s", + "s", + "\n", + " ", + " ", + " ", + " ", + "_", + "u", + "t", + "i", + "l", + "s", + " ", + "=", + " ", + "\"", + "/", + "c", + "o", + "n", + "t", + "e", + "n", + "t", + "/", + "w", + "o", + "r", + "k", + "s", + "h", + "o", + "p", + "_", + "u", + "t", + "i", + "l", + "s", + "\"", + "\n", + " ", + " ", + " ", + " ", + "o", + "s", + ".", + "m", + "a", + "k", + "e", + "d", + "i", + "r", + "s", + "(", + "_", + "u", + "t", + "i", + "l", + "s", + ",", + " ", + "e", + "x", + "i", + "s", + "t", + "_", + "o", + "k", + "=", + "T", + "r", + "u", + "e", + ")", + "\n", + " ", + " ", + " ", + " ", + "_", + "b", + "r", + "a", + "n", + "c", + "h", + " ", + "=", + " ", + "\"", + "c", + "o", + "d", + "e", + "x", + "/", + "d", + "c", + "c", + "2", + "6", + "-", + "w", + "o", + "r", + "k", + "s", + "h", + "o", + "p", + "-", + "n", + "o", + "t", + "e", + "b", + "o", + "o", + "k", + "s", + "\"", + "\n", + " ", + " ", + " ", + " ", + "_", + "b", + "a", + "s", + "e", + " ", + "=", + " ", + "f", + "\"", + "h", + "t", + "t", + "p", + "s", + ":", + "/", + "/", + "r", + "a", + "w", + ".", + "g", + "i", + "t", + "h", + "u", + "b", + "u", + "s", + "e", + "r", + "c", + "o", + "n", + "t", + "e", + "n", + "t", + ".", + "c", + "o", + "m", + "/", + "I", + "D", + "E", + "A", + "L", + "L", + "a", + "b", + "/", + "E", + "n", + "g", + "i", + "O", + "p", + "t", + "/", + "{", + "_", + "b", + "r", + "a", + "n", + "c", + "h", + "}", + "/", + "w", + "o", + "r", + "k", + "s", + "h", + "o", + "p", + "s", + "/", + "d", + "c", + "c", + "2", + "6", + "/", + "u", + "t", + "i", + "l", + "s", + "\"", + "\n", + " ", + " ", + " ", + " ", + "f", + "o", + "r", + " ", + "_", + "f", + " ", + "i", + "n", + " ", + "(", + "\"", + "n", + "o", + "t", + "e", + "b", + "o", + "o", + "k", + "_", + "h", + "e", + "l", + "p", + "e", + "r", + "s", + ".", + "p", + "y", + "\"", + ",", + " ", + "\"", + "_", + "_", + "i", + "n", + "i", + "t", + "_", + "_", + ".", + "p", + "y", + "\"", + ")", + ":", + "\n", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + "i", + "f", + " ", + "n", + "o", + "t", + " ", + "o", + "s", + ".", + "p", + "a", + "t", + "h", + ".", + "e", + "x", + "i", + "s", + "t", + "s", + "(", + "f", + "\"", + "{", + "_", + "u", + "t", + "i", + "l", + "s", + "}", + "/", + "{", + "_", + "f", + "}", + "\"", + ")", + ":", + "\n", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + "s", + "u", + "b", + "p", + "r", + "o", + "c", + "e", + "s", + "s", + ".", + "c", + "h", + "e", + "c", + "k", + "_", + "c", + "a", + "l", + "l", + "(", + "[", + "\"", + "w", + "g", + "e", + "t", + "\"", + ",", + " ", + "\"", + "-", + "q", + "\"", + ",", + " ", + "f", + "\"", + "{", + "_", + "b", + "a", + "s", + "e", + "}", + "/", + "{", + "_", + "f", + "}", + "\"", + ",", + " ", + "\"", + "-", + "O", + "\"", + ",", + " ", + "f", + "\"", + "{", + "_", + "u", + "t", + "i", + "l", + "s", + "}", + "/", + "{", + "_", + "f", + "}", + "\"", + "]", + ")", + "\n", + "e", + "l", + "s", + "e", + ":", + "\n", + " ", + " ", + " ", + " ", + "_", + "u", + "t", + "i", + "l", + "s", + " ", + "=", + " ", + "o", + "s", + ".", + "p", + "a", + "t", + "h", + ".", + "a", + "b", + "s", + "p", + "a", + "t", + "h", + "(", + "\"", + ".", + ".", + "/", + "u", + "t", + "i", + "l", + "s", + "\"", + ")", + " ", + "i", + "f", + " ", + "o", + "s", + ".", + "p", + "a", + "t", + "h", + ".", + "i", + "s", + "d", + "i", + "r", + "(", + "\"", + ".", + ".", + "/", + "u", + "t", + "i", + "l", + "s", + "\"", + ")", + " ", + "e", + "l", + "s", + "e", + " ", + "\"", + "w", + "o", + "r", + "k", + "s", + "h", + "o", + "p", + "s", + "/", + "d", + "c", + "c", + "2", + "6", + "/", + "u", + "t", + "i", + "l", + "s", + "\"", + "\n", + "s", + "y", + "s", + ".", + "p", + "a", + "t", + "h", + ".", + "i", + "n", + "s", + "e", + "r", + "t", + "(", + "0", + ",", + " ", + "_", + "u", + "t", + "i", + "l", + "s", + ")", + "\n", + "i", + "m", + "p", + "o", + "r", + "t", + " ", + "n", + "o", + "t", + "e", + "b", + "o", + "o", + "k", + "_", + "h", + "e", + "l", + "p", + "e", + "r", + "s", + " ", + " ", + "#", + " ", + "n", + "o", + "q", + "a", + ":", + " ", + "E", + "4", + "0", + "2", + "\n", + "i", + "m", + "p", + "o", + "r", + "t", + "l", + "i", + "b", + ".", + "r", + "e", + "l", + "o", + "a", + "d", + "(", + "n", + "o", + "t", + "e", + "b", + "o", + "o", + "k", + "_", + "h", + "e", + "l", + "p", + "e", + "r", + "s", + ")", + " ", + " ", + "#", + " ", + "a", + "l", + "w", + "a", + "y", + "s", + " ", + "p", + "i", + "c", + "k", + " ", + "u", + "p", + " ", + "l", + "a", + "t", + "e", + "s", + "t", + " ", + "e", + "d", + "i", + "t", + "s", + "\n", + "f", + "r", + "o", + "m", + " ", + "n", + "o", + "t", + "e", + "b", + "o", + "o", + "k", + "_", + "h", + "e", + "l", + "p", + "e", + "r", + "s", + " ", + "i", + "m", + "p", + "o", + "r", + "t", + " ", + "*", + " ", + " ", + "#", + " ", + "n", + "o", + "q", + "a", + ":", + " ", + "F", + "4", + "0", + "1", + ",", + "F", + "4", + "0", + "3", + "\n", + "\n", + "f", + "r", + "o", + "m", + " ", + "e", + "n", + "g", + "i", + "b", + "e", + "n", + "c", + "h", + ".", + "u", + "t", + "i", + "l", + "s", + ".", + "a", + "l", + "l", + "_", + "p", + "r", + "o", + "b", + "l", + "e", + "m", + "s", + " ", + "i", + "m", + "p", + "o", + "r", + "t", + " ", + "B", + "U", + "I", + "L", + "T", + "I", + "N", + "_", + "P", + "R", + "O", + "B", + "L", + "E", + "M", + "S", + "\n", + "f", + "r", + "o", + "m", + " ", + "e", + "n", + "g", + "i", + "o", + "p", + "t", + ".", + "c", + "g", + "a", + "n", + "_", + "c", + "n", + "n", + "_", + "2", + "d", + ".", + "c", + "g", + "a", + "n", + "_", + "c", + "n", + "n", + "_", + "2", + "d", + " ", + "i", + "m", + "p", + "o", + "r", + "t", + " ", + "G", + "e", + "n", + "e", + "r", + "a", + "t", + "o", + "r", + " ", + "a", + "s", + " ", + "E", + "n", + "g", + "i", + "O", + "p", + "t", + "C", + "N", + "N", + "G", + "e", + "n", + "e", + "r", + "a", + "t", + "o", + "r" + ], + "id": "cell-6" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 2. Configuration\n\nAll tuneable knobs in one place. **Experiment with these** — especially\n`EPOCHS` and `N_TRAIN` — to see how they affect the generated designs." + ], + "id": "cell-7" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# ---------- Reproducibility ----------\n", + "SEED = 7\n", + "\n", + "# ---------- Problem ----------\n", + "PROBLEM_ID = \"beams2d\" # Change to try a different EngiBench problem\n", + "\n", + "# ---------- Training ----------\n", + "EPOCHS = 15 # Short for workshop; try 50+ for better results\n", + "BATCH_SIZE = 64\n", + "LR = 2e-4 # Adam learning rate\n", + "LATENT_DIM = 32 # Size of random noise vector fed to generator\n", + "# ---------- Generation ----------\n", + "N_SAMPLES = 24 # Designs to generate for Notebook 02\n", + "\n", + "# ---------- Device ----------\n", + "if th.cuda.is_available():\n", + " DEVICE = th.device(\"cuda\")\n", + "elif th.backends.mps.is_available():\n", + " DEVICE = th.device(\"mps\")\n", + "else:\n", + " DEVICE = th.device(\"cpu\")\n", + "print(\"Device:\", DEVICE)\n", + "\n", + "if \"google.colab\" in sys.modules and not th.cuda.is_available():\n", + " print(\"\\n⚠️ WARNING: No GPU detected! Training will be very slow (~1 min/epoch).\")\n", + " print(\" Go to: Runtime → Change runtime type → T4 GPU → Save\")\n", + " print(\" Then re-run from the top.\\n\")\n", + "\n", + "# ---------- Artifact paths ----------\n", + "ARTIFACT_DIR = Path(\"/content/dcc26_artifacts\") if \"google.colab\" in sys.modules else Path(\"workshops/dcc26/artifacts\")\n", + "ARTIFACT_DIR.mkdir(parents=True, exist_ok=True)\n", + "\n", + "CKPT_PATH = ARTIFACT_DIR / \"engiopt_cgan2d_generator_supervised.pt\"\n", + "HISTORY_PATH = ARTIFACT_DIR / \"training_history.csv\"\n", + "TRAIN_CURVE_PATH = ARTIFACT_DIR / \"training_curve.png\"\n", + "\n", + "# ---------- Seed everything ----------\n", + "random.seed(SEED)\n", + "np.random.seed(SEED)\n", + "th.manual_seed(SEED)\n", + "if th.cuda.is_available():\n", + " th.cuda.manual_seed_all(SEED)\n", + "\n", + "print(\"Problem: \", PROBLEM_ID)\n", + "print(\"Artifact dir:\", ARTIFACT_DIR)" + ], + "id": "cell-8" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n\n## 3. Load the EngiBench problem\n\nSame API you used in Notebook 00 — every problem exposes `.dataset`,\n`.conditions_keys`, and `.design_space`." + ], + "id": "cell-9" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "problem = BUILTIN_PROBLEMS[PROBLEM_ID](seed=SEED)\ntrain_ds = problem.dataset[\"train\"]\ntest_ds = problem.dataset[\"test\"]\n\ncondition_keys = problem.conditions_keys\ndesign_shape = problem.design_space.shape\nn_conds = len(condition_keys)\n\nprint(f\"Problem : {type(problem).__name__}\")\nprint(f\"Design shape : {design_shape}\")\nprint(f\"Condition keys : {condition_keys}\")\nprint(f\"Train examples : {len(train_ds)}\")\nprint(f\"Test examples : {len(test_ds)}\")" + ], + "id": "cell-10" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Quick look at a few training designs\nshow_design_gallery(problem.dataset, problem, n=4, seed=SEED)" + ], + "id": "cell-11" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n\n## 4. FILL-IN 01-A: Prepare training data\n\nThe EngiBench dataset stores conditions and designs as separate columns.\nTo train a neural network we need to extract them into numeric arrays:\n\n1. **Conditions**: a `(N, n_conds)` array of floats — one row per sample, one column per condition key\n2. **Designs**: a `(N, H, W)` array of pixel values\n\nWe use the **full training set** so the model sees as many examples as possible.\nWe also rescale designs from `[0, 1]` to `[-1, 1]` because the generator uses a\n`tanh` output layer (which naturally outputs that range)." + ], + "id": "cell-12" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# FILL-IN CELL 01-A\n# Goal: extract conditions and designs from the full EngiBench training set.\n\nrng = np.random.default_rng(SEED)\n\n# START FILL ---------------------------------------------------------------\n\n# 1. Stack all condition columns into one (N, n_conds) array\n# Hint: use np.stack with a list comprehension over condition_keys\n# Example: np.stack([np.array(train_ds[k]).astype(np.float32)\n# for k in condition_keys], axis=1)\nconds_np = None\n\n# 2. Extract the optimal designs\n# Hint: np.array(train_ds[\"optimal_design\"]).astype(np.float32)\ndesigns_np = None\n\n# 3. Rescale designs from [0, 1] to [-1, 1]\n# Hint: targets = designs * 2.0 - 1.0\ntargets_np = None\n\n# END FILL -----------------------------------------------------------------\n\n# CHECKPOINT\nn_train = len(train_ds)\nassert conds_np is not None and designs_np is not None and targets_np is not None, (\n \"Fill in conds_np, designs_np, and targets_np above.\"\n)\nassert conds_np.shape == (n_train, n_conds), (\n f\"Expected conditions shape ({n_train}, {n_conds}), got {conds_np.shape}\"\n)\nassert targets_np.shape == (n_train, *design_shape), (\n f\"Expected targets shape ({n_train}, {', '.join(map(str, design_shape))}), got {targets_np.shape}\"\n)\nassert targets_np.min() >= -1.0 and targets_np.max() <= 1.0, (\n f\"Targets should be in [-1, 1], got [{targets_np.min():.2f}, {targets_np.max():.2f}]\"\n)\nprint(f\"CHECKPOINT passed: {n_train} samples, conditions {conds_np.shape}, targets {targets_np.shape}\")" + ], + "id": "cell-13" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n\n## 5. The Generator model\n\nWe use a **convolutional conditional generator** (cDCGAN) from EngiOpt. Unlike a\nsimple fully-connected network that treats the design as a flat vector of pixels,\nthis model uses **transposed convolutions** that upsample a small feature map\ninto a full-resolution design image — preserving spatial structure at every step.\n\n```\nnoise (32, 1, 1) ──► ConvT ──┐\n ├─► concat (256, 7, 7)\nconditions (4, 1, 1) ► ConvT ┘ │\n ▼\n ConvT 7×7 → 13×13\n ConvT 13×13 → 25×25\n ConvT 25×25 → 50×50\n ConvT 50×50 → 100×100 → resize → design\n```\n\nThis **convolutional inductive bias** is why CNN generators produce much sharper\ndesigns than MLP generators: each layer reasons about local spatial\nneighbourhoods rather than treating every pixel independently." + ], + "id": "cell-14" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Wrap the CNN generator so it accepts flat (B, dim) inputs\nfrom notebook_helpers import WorkshopGenerator\n\ncnn_gen = EngiOptCNNGenerator(\n latent_dim=LATENT_DIM,\n n_conds=n_conds,\n design_shape=design_shape,\n)\nmodel = WorkshopGenerator(cnn_gen).to(DEVICE)\n\nn_params = sum(p.numel() for p in model.parameters())\nprint(f\"Generator created: {n_params:,} parameters\")\nprint(f\"Input: noise ({LATENT_DIM}) + conditions ({n_conds}) = {LATENT_DIM + n_conds}\")\nprint(f\"Output: {' x '.join(map(str, design_shape))} design image\")" + ], + "id": "cell-15" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n\n## 6. FILL-IN 01-B: Train the model\n\nTraining is **supervised**: for each sample, the model sees random noise +\nconditions and tries to reproduce the optimal design. The loss measures\npixel-by-pixel error (MSE).\n\nWe provide a `train_supervised_generator()` helper that handles the training\nloop. Your job: **call it with the right arguments and experiment with\nsettings.**\n\n> **Try it:** After training with the default 8 epochs, change `EPOCHS` to 20\n> or 50 in the config cell above, re-run from there, and see how the loss and\n> designs change." + ], + "id": "cell-16" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# FILL-IN CELL 01-B\n# Goal: train the generator. Experiment with EPOCHS and N_TRAIN.\n\n# Pick a few test conditions for snapshot visualization during training\nsnap_idx = rng.choice(len(test_ds), size=4, replace=False)\nsnap_conds = np.stack(\n [np.array(test_ds[k])[snap_idx].astype(np.float32) for k in condition_keys],\n axis=1,\n)\nsnap_baselines = np.array(test_ds[\"optimal_design\"])[snap_idx].astype(np.float32)\n\n# START FILL ---------------------------------------------------------------\n\n# Call train_supervised_generator() with appropriate arguments.\n# It returns a dict with keys \"losses\" and \"snapshots\".\n#\n# Signature:\n# train_supervised_generator(\n# model, conditions_array, targets_array,\n# latent_dim=..., epochs=..., batch_size=..., lr=..., device=...,\n# snapshot_conditions=..., snapshot_at_epochs=[...],\n# )\n#\n# Use the variables: model, conds_np, targets_np, LATENT_DIM, EPOCHS,\n# BATCH_SIZE, LR, DEVICE, snap_conds\n\ntrain_result = None # Replace with the function call\n\n# END FILL -----------------------------------------------------------------\n\nif train_result is None:\n raise RuntimeError(\"Call train_supervised_generator() above and assign to train_result.\")\n\ntrain_losses = train_result[\"losses\"]\nsnapshots = train_result[\"snapshots\"]\n\n# Save checkpoint\nth.save(model.state_dict(), CKPT_PATH)\n\n# CHECKPOINT\nassert len(train_losses) == EPOCHS, f\"Expected {EPOCHS} loss values, got {len(train_losses)}\"\nassert train_losses[-1] < train_losses[0], (\n \"Loss did not decrease — check your training arguments.\"\n)\nprint(f\"\\nCHECKPOINT passed: trained for {EPOCHS} epochs, final loss {train_losses[-1]:.6f}\")" + ], + "id": "cell-17" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Training loss curve\n\nThe loss should decrease over epochs. A flat or increasing loss means something\nwent wrong. Note that even a decreasing loss does not guarantee good designs —\nMSE rewards blurry averages." + ], + "id": "cell-18" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Save training history\nimport pandas as pd\npd.DataFrame({\"epoch\": range(1, len(train_losses) + 1), \"loss\": train_losses}).to_csv(\n HISTORY_PATH, index=False,\n)\n\nshow_training_curve(train_losses, save_path=str(TRAIN_CURVE_PATH))" + ], + "id": "cell-19" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### How the generator learns\n\nBelow you can see what the generator produces at different points during\ntraining. Early outputs are random noise; later outputs start to resemble beam\nstructures. The ground-truth row shows what the model is trying to match." + ], + "id": "cell-20" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_training_progression(snapshots, baseline_designs=snap_baselines, n_show=4)" + ], + "id": "cell-21" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n\n## 7. FILL-IN 01-C: Generate designs from test conditions\n\nNow for the payoff: use your trained model to produce designs for **conditions\nit has never seen** (from the held-out test set).\n\nIf the model generalises, it should produce reasonable designs for new\nconditions without running the optimizer. The `generate_designs()` helper\nhandles the inference — you just need to:\n\n1. Pick test conditions from the EngiBench dataset\n2. Call the generator\n3. Also extract the ground-truth baselines for comparison" + ], + "id": "cell-22" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# FILL-IN CELL 01-C\n# Goal: generate N_SAMPLES designs conditioned on test-set conditions.\n\n# START FILL ---------------------------------------------------------------\n\n# 1. Sample N_SAMPLES indices from the test set\n# Example: test_idx = rng.choice(len(test_ds), size=N_SAMPLES, replace=False)\ntest_idx = None\n\n# 2. Extract test conditions as (N_SAMPLES, n_conds) array and baseline designs\n# Example:\n# test_conds_np = np.stack(\n# [np.array(test_ds[k])[test_idx].astype(np.float32) for k in condition_keys],\n# axis=1,\n# )\n# baseline_designs = np.array(test_ds[\"optimal_design\"])[test_idx].astype(np.float32)\ntest_conds_np = None\nbaseline_designs = None\n\n# 3. Generate designs using generate_designs()\n# Example: gen_designs = generate_designs(model, test_conds_np, latent_dim=LATENT_DIM, device=DEVICE)\ngen_designs = None\n\n# 4. Build condition records (list of dicts) for JSON export\n# Example:\n# conditions_records = [\n# {k: float(test_conds_np[i, j]) for j, k in enumerate(condition_keys)}\n# for i in range(N_SAMPLES)\n# ]\nconditions_records = None\n\n# END FILL -----------------------------------------------------------------\n\n# CHECKPOINT\nfor name, val in [(\"test_idx\", test_idx), (\"test_conds_np\", test_conds_np),\n (\"baseline_designs\", baseline_designs), (\"gen_designs\", gen_designs),\n (\"conditions_records\", conditions_records)]:\n assert val is not None, f\"Fill in {name} above.\"\nassert gen_designs.shape == baseline_designs.shape, (\n f\"Shape mismatch: generated {gen_designs.shape} vs baseline {baseline_designs.shape}\"\n)\nassert len(conditions_records) == N_SAMPLES\nassert 0.0 <= gen_designs.min() and gen_designs.max() <= 1.0, (\n f\"Generated designs should be in [0, 1], got [{gen_designs.min():.2f}, {gen_designs.max():.2f}]\"\n)\nprint(f\"CHECKPOINT passed: generated {N_SAMPLES} designs, shape {gen_designs.shape}\")" + ], + "id": "cell-23" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n\n## 8. Visual comparison: Generated vs Ground Truth\n\nEach column shows the same conditions. Top row = your model's output; bottom row\n= the optimizer's solution from the dataset.\n\n**What to look for:**\n- **Blurriness:** generated designs are often blurry because MSE loss averages\n over possible solutions\n- **Structure:** do the generated designs have recognisable beam topology (load\n paths, supports)?\n- **Condition sensitivity:** do different conditions produce visibly different\n designs, or does the model output the same thing regardless?" + ], + "id": "cell-24" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_gen_vs_baseline(gen_designs, baseline_designs, conditions_records, condition_keys)" + ], + "id": "cell-25" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n\n## 9. Export artifacts for Notebook 02\n\nNotebook 02 needs three files to run its evaluation pipeline:\n- `generated_designs.npy` — your model's output\n- `baseline_designs.npy` — ground-truth designs from the dataset\n- `conditions.json` — the conditions used for generation" + ], + "id": "cell-26" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "np.save(ARTIFACT_DIR / \"generated_designs.npy\", gen_designs)\nnp.save(ARTIFACT_DIR / \"baseline_designs.npy\", baseline_designs)\nwith open(ARTIFACT_DIR / \"conditions.json\", \"w\") as f:\n json.dump(conditions_records, f, indent=2)\n\n# Verify\nrequired = [\"generated_designs.npy\", \"baseline_designs.npy\", \"conditions.json\"]\nmissing = [f for f in required if not (ARTIFACT_DIR / f).exists()]\nassert not missing, f\"Missing: {missing}\"\nprint(f\"Exported to {ARTIFACT_DIR}:\")\nfor f in required:\n print(f\" {f}\")" + ], + "id": "cell-27" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n\n## Discussion\n\n### What you have seen\n\nYou trained a neural network on a few hundred examples for a few epochs and used\nit to produce beam designs in milliseconds. The results are imperfect — and that\nis exactly the point.\n\n### Questions to think about\n\n1. **Why are the designs blurry?** MSE loss penalises pixel-wise error, which\n rewards the *average* of all plausible designs rather than any single sharp\n one. What alternative losses or model architectures might produce crisper\n output? (Think: adversarial loss, diffusion models, VAEs.)\n\n2. **Does the model respond to conditions?** Compare designs generated for very\n different volume fractions or load distributions. If they all look the same,\n the model may have learned the dataset mean rather than the\n condition → design relationship. What might help? (More training data? More\n epochs? A different architecture?)\n\n3. **From pixels to physics.** A design can *look* reasonable but fail under\n simulation — disconnected material, wrong volume fraction, stress\n concentrations. Notebook 02 will run the physics solver on your generated\n designs and quantify these failures.\n\n4. **The benchmarking motivation.** We do not know how bad these designs are\n until we *measure*. That is the role of a benchmark: providing standardised\n evaluation so we can compare methods, track progress, and avoid fooling\n ourselves with visual inspection alone.\n\n5. **What would you change?** If you had an hour instead of 30 minutes, what\n would you try? More data, more epochs, a different model, a different loss\n function? How would you decide whether it *actually* improved?" + ], + "id": "cell-28" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n\n## Next\n\nProceed to **Notebook 02** to evaluate your generated designs with physics-based\nsimulation and compute benchmark metrics. Your exported artifacts are the input." + ], + "id": "cell-29" + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "name": "python", + "version": "3.11.0" + }, + "accelerator": "GPU", + "colab": { + "provenance": [], + "gpuType": "T4" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/workshops/dcc26/participant/02_evaluate_metrics.ipynb b/workshops/dcc26/participant/02_evaluate_metrics.ipynb new file mode 100644 index 0000000..9ce8cf8 --- /dev/null +++ b/workshops/dcc26/participant/02_evaluate_metrics.ipynb @@ -0,0 +1,1271 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Notebook 02 (Participant): Evaluate Your Generated Designs\n", + "\n", + "In Notebook 01 you trained a generative model and produced candidate beam designs.\n", + "Now comes the critical question: **are those designs actually any good?**\n", + "\n", + "In generative modeling for engineering, \"good\" is **not** a single number.\n", + "A design can look plausible yet fail simulation. It can perform well on one\n", + "objective yet violate a critical constraint. It can be high-quality but\n", + "identical to a training example -- memorised, not generalised.\n", + "\n", + "This notebook walks you through a **structured evaluation pipeline** that\n", + "diagnoses generative model quality from multiple complementary angles, each\n", + "revealing something the others miss." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## A taxonomy of generative-model metrics\n", + "\n", + "```\n", + " How do we evaluate a generative model?\n", + " |\n", + " ┌───────────────┬───────────┼───────────────┬──────────────────┐\n", + " | | | | |\n", + " Visual Simulation Constraint Distributional Diversity &\n", + " Inspection Performance Satisfaction Similarity Coverage\n", + " | | | | |\n", + " \"Does it \"Does it \"Is it \"Does it \"Did we\n", + " look right?\" work?\" legal?\" match explore?\"\n", + " reality?\"\n", + " | | | | |\n", + " Residual Compliance Volfrac error MMD Pairwise L2\n", + " heatmaps histogram distribution (Gaussian DPP diversity\n", + " + scatter + feasibility kernel) NN novelty\n", + " + per-sample rate bars PCA embedding\n", + " gap bars\n", + "```\n", + "\n", + "No single metric tells the whole story. A model can ace one category and\n", + "fail another -- and *which failure matters most* depends on your application.\n", + "\n", + "| Category | Question | Beams2D metric | Why it matters |\n", + "|----------|----------|---------------|----------------|\n", + "| **Visual inspection** | Does it look like a real beam? | Residual heatmaps | Quick sanity check; catches gross failures |\n", + "| **Simulation performance** | Does the physics solver confirm it works? | Compliance gap vs baseline | The ground truth -- simulation is our oracle |\n", + "| **Constraint satisfaction** | Does it obey the engineering spec? | Volume fraction error | A stiff beam using too much material is invalid |\n", + "| **Distributional similarity** | Does the generator match the real data distribution? | MMD (Maximum Mean Discrepancy) | Detects mode collapse, unrealistic densities |\n", + "| **Diversity & coverage** | Did the model explore, or did it memorise? | Pairwise L2, DPP, NN novelty | A model outputting one beam 24 times is useless |\n", + "| **Optimization warmstarting** | Does it give the optimizer a head start? | IOG, COG, FOG | The ultimate downstream utility test |" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### The evaluation pipeline at a glance\n", + "\n", + "```\n", + "Generated Designs Baseline Designs Training Designs\n", + " | | |\n", + " v v v\n", + " [ Visual Inspection ] [ Reference set ]\n", + " | | |\n", + " v v |\n", + " [ Simulate ] [ Simulate ] |\n", + " | | |\n", + " v v |\n", + " Objectives Objectives |\n", + " \\ / |\n", + " \\ / |\n", + " v v |\n", + " Simulation Metrics |\n", + " (gap, improvement rate) |\n", + " | |\n", + " v v\n", + " Constraint Metrics Distributional Metrics\n", + " (volfrac error, feasibility) (MMD, pixel distributions)\n", + " | |\n", + " v v\n", + " Diversity Metrics <──────────────── PCA Embedding\n", + " (pairwise L2, DPP, NN novelty)\n", + " |\n", + " v\n", + " Optimization Warmstarting\n", + " (IOG, COG, FOG trajectories)\n", + " |\n", + " v\n", + " Summary Dashboard\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Edit-safe start:** this notebook opens from GitHub in read-only source mode. Use **File -> Save a copy in Drive** before running edits so your changes stay in your own workspace." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Notebook map\n", + "\n", + "| Part | What you do | Key output |\n", + "|------|-------------|------------|\n", + "| Setup | Install deps, load artifacts | `gen_designs`, `baseline_designs`, `conditions` |\n", + "| Part 1 | Visual inspection (the eye test) | Residual heatmaps |\n", + "| Part 2 (Fill-in 02-A) | Per-sample simulation | `results` DataFrame |\n", + "| Part 3 | Constraint satisfaction analysis | Volfrac scatter + error distribution |\n", + "| Part 4 (Fill-in 02-B) | Distributional similarity (MMD) | `mmd_value` |\n", + "| Part 5 | Diversity & coverage | Pairwise heatmap + PCA embedding |\n", + "| Part 6 | Optimization warmstarting (demo) | Trajectory plots with IOG/COG/FOG |\n", + "| Part 7 (Fill-in 02-C) | Comprehensive summary dashboard | `summary_df` |\n", + "\n", + "### Legend\n", + "- `PUBLIC FILL-IN CELL` -- you write code here.\n", + "- `CHECKPOINT` -- run this assertion block to verify before moving on.\n", + "- `# START FILL` / `# END FILL` -- your edits go between these markers." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Step 0: Install dependencies" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Colab/local dependency bootstrap\n", + "import subprocess, sys\n", + "\n", + "IN_COLAB = \"google.colab\" in sys.modules\n", + "FORCE_INSTALL = False # Set True to force install outside Colab\n", + "\n", + "if IN_COLAB or FORCE_INSTALL:\n", + " def pip_install(pkgs):\n", + " subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", *pkgs])\n", + "\n", + " pip_install([\"engibench[all]\", \"sqlitedict\", \"matplotlib\", \"tqdm\", \"tyro\", \"wandb\"])\n", + " pip_install([\"git+https://github.com/IDEALLab/EngiOpt.git@codex/dcc26-workshop-notebooks#egg=engiopt\"])\n", + " try:\n", + " import torch\n", + " except Exception:\n", + " pip_install([\"torch\", \"torchvision\"])\n", + " print(\"Install complete.\")\n", + "else:\n", + " print(\"Using current environment. Set FORCE_INSTALL=True to install here.\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Step 1: Load artifacts from Notebook 01\n", + "\n", + "We need three files that Notebook 01 exported:\n", + "- `generated_designs.npy` -- the designs your model produced\n", + "- `baseline_designs.npy` -- optimised reference designs from the dataset\n", + "- `conditions.json` -- the boundary-condition configs for each sample\n", + "\n", + "The next cell contains a recovery function that **automatically rebuilds** these\n", + "artifacts if they are missing (e.g., if you jumped straight to NB02). You do not\n", + "need to read or understand that function -- just run it." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# ── Artifact recovery (runs only if NB01 artifacts are missing) ──────────\n", + "# This cell auto-builds NB01 artifacts so NB02 works standalone.\n", + "# You do NOT need to read this code -- just run the cell.\n", + "\n", + "import importlib\n", + "import json, random, sys, os\n", + "from pathlib import Path\n", + "import numpy as np\n", + "import pandas as pd\n", + "import torch as th\n", + "import torch.nn as nn\n", + "from torch.utils.data import DataLoader, TensorDataset\n", + "\n", + "# Workshop helpers\n", + "if \"google.colab\" in sys.modules:\n", + " import subprocess as _sp\n", + " _utils = \"/content/workshop_utils\"\n", + " os.makedirs(_utils, exist_ok=True)\n", + " _branch = \"codex/dcc26-workshop-notebooks\"\n", + " _base = f\"https://raw.githubusercontent.com/IDEALLab/EngiOpt/{_branch}/workshops/dcc26/utils\"\n", + " for _f in (\"notebook_helpers.py\", \"__init__.py\"):\n", + " if not os.path.exists(f\"{_utils}/{_f}\"):\n", + " _sp.check_call([\"wget\", \"-q\", f\"{_base}/{_f}\", \"-O\", f\"{_utils}/{_f}\"])\n", + "else:\n", + " _utils = os.path.abspath(\"../utils\") if os.path.isdir(\"../utils\") else \"workshops/dcc26/utils\"\n", + "sys.path.insert(0, _utils)\n", + "import notebook_helpers # noqa: E402\n", + "importlib.reload(notebook_helpers) # always pick up latest edits\n", + "from notebook_helpers import * # noqa: F401,F403\n", + "\n", + "from engibench.utils.all_problems import BUILTIN_PROBLEMS\n", + "\n", + "PROBLEM_ID = \"beams2d\"\n", + "\n", + "try:\n", + " from engiopt.cgan_2d.cgan_2d import Generator as EngiOptCGAN2DGenerator\n", + "except ModuleNotFoundError as exc:\n", + " raise ModuleNotFoundError(\n", + " \"Could not import engiopt. Run the install cell first; on Colab, restart runtime after install.\"\n", + " ) from exc\n", + "\n", + "\n", + "def _resolve_artifact_dir(create=False):\n", + " p = Path(\"/content/dcc26_artifacts\") if \"google.colab\" in sys.modules else Path(\"workshops/dcc26/artifacts\")\n", + " if create:\n", + " p.mkdir(parents=True, exist_ok=True)\n", + " return p\n", + "\n", + "\n", + "def _build_artifacts_locally(artifact_dir, seed=7, n_train=512, n_samples=24, epochs=8, batch_size=64, latent_dim=32):\n", + " \"\"\"Replicate the NB01 train+generate pipeline to produce evaluation artifacts.\"\"\"\n", + " print(\"Auto-building NB01 artifacts (this takes ~1 min)...\")\n", + " random.seed(seed); np.random.seed(seed); th.manual_seed(seed)\n", + " if th.cuda.is_available(): th.cuda.manual_seed_all(seed)\n", + " device = th.device(\"cuda\" if th.cuda.is_available() else \"cpu\")\n", + " problem = BUILTIN_PROBLEMS[PROBLEM_ID](seed=seed)\n", + " train_ds, test_ds = problem.dataset[\"train\"], problem.dataset[\"test\"]\n", + " ckeys = problem.conditions_keys\n", + " rng = np.random.default_rng(seed)\n", + " idx = rng.choice(len(train_ds), size=min(n_train, len(train_ds)), replace=False)\n", + " conds = np.stack([np.array(train_ds[k])[idx].astype(np.float32) for k in ckeys], axis=1)\n", + " designs = np.array(train_ds[\"optimal_design\"])[idx].astype(np.float32)\n", + " targets = designs * 2.0 - 1.0\n", + " model = EngiOptCGAN2DGenerator(latent_dim=latent_dim, n_conds=conds.shape[1], design_shape=problem.design_space.shape).to(device)\n", + " opt = th.optim.Adam(model.parameters(), lr=1e-3)\n", + " crit = nn.MSELoss()\n", + " dl = DataLoader(TensorDataset(th.tensor(conds), th.tensor(targets)), batch_size=batch_size, shuffle=True)\n", + " losses = []\n", + " for ep in range(epochs):\n", + " model.train(); ep_loss = 0.0\n", + " for cb, tb in dl:\n", + " cb, tb = cb.to(device), tb.to(device)\n", + " pred = model(th.randn(cb.shape[0], latent_dim, device=device), cb)\n", + " loss = crit(pred, tb); opt.zero_grad(); loss.backward(); opt.step()\n", + " ep_loss += loss.item()\n", + " avg = ep_loss / len(dl); losses.append(avg)\n", + " print(f\" epoch {ep+1:02d}/{epochs} loss={avg:.4f}\")\n", + " sc = min(n_samples, len(test_ds))\n", + " sel = rng.choice(len(test_ds), size=sc, replace=False)\n", + " tc = np.stack([np.array(test_ds[k])[sel].astype(np.float32) for k in ckeys], axis=1)\n", + " bl = np.array(test_ds[\"optimal_design\"])[sel].astype(np.float32)\n", + " model.eval()\n", + " with th.no_grad():\n", + " out = model(th.randn(sc, latent_dim, device=device), th.tensor(tc, device=device))\n", + " gd = ((out.clamp(-1, 1) + 1) / 2).clamp(0, 1).cpu().numpy().astype(np.float32)\n", + " cond_recs = []\n", + " for i in range(sc):\n", + " rec = {}\n", + " for j, k in enumerate(ckeys):\n", + " rec[k] = bool(tc[i, j]) if k == \"overhang_constraint\" else float(tc[i, j])\n", + " cond_recs.append(rec)\n", + " artifact_dir.mkdir(parents=True, exist_ok=True)\n", + " np.save(artifact_dir / \"generated_designs.npy\", gd)\n", + " np.save(artifact_dir / \"baseline_designs.npy\", bl)\n", + " with open(artifact_dir / \"conditions.json\", \"w\") as f: json.dump(cond_recs, f, indent=2)\n", + " pd.DataFrame({\"epoch\": range(1, len(losses)+1), \"train_loss\": losses}).to_csv(artifact_dir / \"training_history.csv\", index=False)\n", + " th.save({\"model\": model.state_dict(), \"condition_keys\": ckeys, \"latent_dim\": latent_dim}, artifact_dir / \"engiopt_cgan2d_generator_supervised.pt\")\n", + " print(\"Artifacts ready at\", artifact_dir)\n", + "\n", + "\n", + "ARTIFACT_DIR = _resolve_artifact_dir(create=True)\n", + "_required = [ARTIFACT_DIR / f for f in (\"generated_designs.npy\", \"baseline_designs.npy\", \"conditions.json\")]\n", + "if not all(p.exists() for p in _required):\n", + " _build_artifacts_locally(ARTIFACT_DIR)\n", + "\n", + "print(\"Artifact directory:\", ARTIFACT_DIR)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# ── Load artifacts ───────────────────────────────────────────────────────\n", + "import json\n", + "import numpy as np\n", + "import pandas as pd\n", + "import matplotlib.pyplot as plt\n", + "from scipy.spatial.distance import cdist\n", + "\n", + "gen_designs = np.load(ARTIFACT_DIR / \"generated_designs.npy\")\n", + "baseline_designs = np.load(ARTIFACT_DIR / \"baseline_designs.npy\")\n", + "with open(ARTIFACT_DIR / \"conditions.json\") as f:\n", + " conditions = json.load(f)\n", + "\n", + "print(f\"Generated designs : {gen_designs.shape} (values in [{gen_designs.min():.2f}, {gen_designs.max():.2f}])\")\n", + "print(f\"Baseline designs : {baseline_designs.shape}\")\n", + "print(f\"Condition records : {len(conditions)}\")\n", + "print(f\"Condition keys : {list(conditions[0].keys())}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Load a reference subset of training designs for distributional + novelty metrics\n", + "problem_ref = BUILTIN_PROBLEMS[PROBLEM_ID](seed=7)\n", + "train_designs_full = np.array(problem_ref.dataset[\"train\"][\"optimal_design\"]).astype(np.float32)\n", + "ref_idx = np.random.default_rng(7).choice(\n", + " len(train_designs_full), size=min(1024, len(train_designs_full)), replace=False\n", + ")\n", + "train_reference = train_designs_full[ref_idx]\n", + "print(f\"Training reference set: {train_reference.shape[0]} designs\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Part 1: Visual Inspection -- The Eye Test\n", + "\n", + "Before computing any metric, **look at the designs**. Visual inspection catches\n", + "gross failures immediately: is the model producing solid blocks? random noise?\n", + "something that looks vaguely beam-like?\n", + "\n", + "We show three views:\n", + "1. **Side-by-side gallery** -- generated vs optimised baseline\n", + "2. **Pixel residual heatmaps** -- where exactly do the designs differ?\n", + "\n", + "Visual inspection is *necessary* but **not sufficient**. A design can look\n", + "plausible yet perform terribly in simulation, or violate constraints that\n", + "are invisible to the eye. The rest of this notebook quantifies what your\n", + "eyes cannot." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_residual_heatmaps(gen_designs, baseline_designs, n_show=6)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Takeaway:** The residual heatmaps reveal where the generator struggles most.\n", + "Bright regions = large pixel error. Notice how errors tend to cluster at\n", + "structural boundaries and fine features -- exactly the details that matter\n", + "most for physical performance.\n", + "\n", + "But pixels alone don't tell us about *compliance*, *constraint violations*, or\n", + "*diversity*. We need simulation." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Part 2: Simulation Performance -- \"Does it work?\"\n", + "\n", + "The **physics simulator** is our oracle. For Beams2D, it computes the\n", + "*compliance* of each design under the given boundary conditions:\n", + "- **Lower compliance = stiffer beam = better design**\n", + "\n", + "We simulate both the generated design and its corresponding baseline\n", + "(the optimised design from the dataset) under **identical conditions**.\n", + "The difference tells us how far the generator is from optimal.\n", + "\n", + "> **Analogy:** Imagine you asked an architecture student to sketch a bridge.\n", + "> Visual inspection tells you the sketch looks bridge-like. But only a\n", + "> structural engineer (our simulator) can tell you whether it would actually\n", + "> stand up." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "problem = BUILTIN_PROBLEMS[PROBLEM_ID](seed=7)\n", + "\n", + "# Feasibility tolerance: how close must volfrac be to the target?\n", + "VOLFRAC_TOL = 0.05\n", + "\n", + "# PUBLIC FILL-IN CELL 02-A\n", + "# Goal: build a list of dicts, one per sample, with objective + feasibility info.\n", + "#\n", + "# For each sample i, you have:\n", + "# g = gen_designs[i] -- generated design (2D numpy array)\n", + "# b = baseline_designs[i] -- baseline design (2D numpy array)\n", + "# cfg = conditions[i] -- dict with keys like 'volfrac', 'rmin', etc.\n", + "\n", + "rows = []\n", + "\n", + "# START FILL ---------------------------------------------------------------\n", + "for i in range(len(gen_designs)):\n", + " g = gen_designs[i]\n", + " b = baseline_designs[i]\n", + " cfg = dict(conditions[i])\n", + "\n", + " # 1) Compute volume fractions (mean pixel value of each design)\n", + " g_vf = None # TODO: compute mean of g\n", + " b_vf = None # TODO: compute mean of b\n", + " target_vf = cfg[\"volfrac\"]\n", + "\n", + " # 2) Check feasibility: is |actual_vf - target_vf| <= VOLFRAC_TOL?\n", + " g_feasible = None # TODO: True/False\n", + " b_feasible = None # TODO: True/False\n", + "\n", + " # 3) Simulate both designs under identical conditions\n", + " # Hint: call problem.reset(seed=...) before each simulate for reproducibility\n", + " # Hint: problem.simulate(design, config=cfg) returns an array; take element [0]\n", + " problem.reset(seed=7 + i)\n", + " g_obj = None # TODO: simulate the generated design\n", + " problem.reset(seed=7 + i)\n", + " b_obj = None # TODO: simulate the baseline design\n", + "\n", + " # 4) Record everything\n", + " rows.append({\n", + " \"sample\": i,\n", + " \"gen_obj\": g_obj,\n", + " \"base_obj\": b_obj,\n", + " \"gen_minus_base\": g_obj - b_obj,\n", + " \"gen_volfrac\": g_vf,\n", + " \"target_volfrac\": target_vf,\n", + " \"gen_feasible\": g_feasible,\n", + " \"base_feasible\": b_feasible,\n", + " })\n", + "\n", + "raise NotImplementedError(\"Fill in the TODOs above, then delete this line.\")\n", + "# END FILL -----------------------------------------------------------------\n", + "\n", + "results = pd.DataFrame(rows)\n", + "results.head()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# CHECKPOINT 02-A\n", + "expected_cols = {\"sample\", \"gen_obj\", \"base_obj\", \"gen_minus_base\", \"gen_volfrac\",\n", + " \"target_volfrac\", \"gen_feasible\", \"base_feasible\"}\n", + "missing_cols = expected_cols - set(results.columns)\n", + "assert not missing_cols, f\"Missing columns: {missing_cols}\"\n", + "assert len(results) == len(gen_designs), f\"Expected {len(gen_designs)} rows, got {len(results)}\"\n", + "assert results[\"gen_obj\"].notna().all(), \"gen_obj contains NaN -- did you forget to simulate?\"\n", + "assert results[\"gen_feasible\"].dtype == bool, \"gen_feasible should be boolean\"\n", + "print(f\"Checkpoint 02-A passed: {len(results)} samples evaluated.\")\n", + "print(f\" Feasible generated: {results['gen_feasible'].sum()}/{len(results)}\")\n", + "print(f\" Feasible baseline: {results['base_feasible'].sum()}/{len(results)}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Visualising simulation results\n", + "\n", + "Three complementary views:\n", + "1. **Histogram** -- overall distribution of objectives (generated vs baseline)\n", + "2. **Scatter plot** -- per-sample pairing (points below diagonal = generated is better)\n", + "3. **Residual bar chart** -- per-sample gap, signed (green = generated outperforms)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_objective_comparison(results)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_objective_residuals(results)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Reading the simulation results\n", + "\n", + "- **Histogram overlap**: If the blue (generated) and orange (baseline) distributions\n", + " overlap heavily, the generator is competitive. If blue is shifted right (higher\n", + " compliance), the generator produces weaker designs.\n", + "\n", + "- **Scatter diagonal**: Points *below* the diagonal line mean the generated design\n", + " outperformed the optimised baseline for that sample -- a strong result.\n", + "\n", + "- **Residual bars**: The bar chart makes the per-sample gap immediately visible.\n", + " Consistent green bars = the model is competitive. Large red bars = specific\n", + " failure modes worth investigating (check the design images for those samples)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Part 3: Constraint Satisfaction -- \"Is it legal?\"\n", + "\n", + "A design that performs well but **violates constraints** is useless in practice.\n", + "For Beams2D, the key constraint is **volume fraction**: the design must use\n", + "a specific amount of material (neither too much nor too little).\n", + "\n", + "> **Analogy:** An architect who designs a beautiful building that exceeds the\n", + "> budget by 50% has not solved the problem -- they have created a new one.\n", + "\n", + "We already computed `gen_volfrac` and `target_volfrac` in the simulation loop.\n", + "Now let's visualise how well the generator satisfies this constraint." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_volfrac_analysis(results, volfrac_tol=VOLFRAC_TOL)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_feasibility_bars(results)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Reading the constraint results\n", + "\n", + "- **Left scatter**: Points near the diagonal are feasible; points far from it\n", + " are violating the volume fraction constraint. The green band shows the\n", + " tolerance window.\n", + "\n", + "- **Error histogram**: A narrow distribution centered at zero means the generator\n", + " has learned to control material usage. A wide or biased distribution suggests\n", + " the model ignores the volume fraction condition.\n", + "\n", + "- **Feasibility rate**: The bar chart gives the bottom line. If the baseline\n", + " achieves ~100% feasibility but the generator is at 50%, there is a clear\n", + " conditioning failure.\n", + "\n", + "**Why this matters beyond beams:** In real engineering, constraints can be\n", + "stress limits, manufacturing tolerances, thermal budgets, or regulatory\n", + "requirements. A generative model that ignores constraints generates\n", + "*interesting but unusable* designs." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Part 4: Distributional Similarity -- \"Does it match reality?\"\n", + "\n", + "The previous metrics evaluated designs **individually** (per-sample objective,\n", + "per-sample feasibility). But we also need to ask: does the *distribution*\n", + "of generated designs match the distribution of the ground-truth optimal designs\n", + "**for the same conditions**?\n", + "\n", + "### What is MMD?\n", + "\n", + "**Maximum Mean Discrepancy (MMD)** is a kernel-based distance between two\n", + "distributions. Intuitively:\n", + "\n", + "1. Map each design into a high-dimensional feature space via a Gaussian kernel\n", + "2. Compare the *mean embeddings* of the two sets\n", + "3. If the means match, the distributions are similar; if they diverge, they are different\n", + "\n", + "$$\\text{MMD}^2 = \\underbrace{\\mathbb{E}[k(x, x')]}_{\\text{gen-gen similarity}} + \\underbrace{\\mathbb{E}[k(y, y')]}_{\\text{base-base similarity}} - 2\\,\\underbrace{\\mathbb{E}[k(x, y)]}_{\\text{cross similarity}}$$\n", + "\n", + "- **MMD = 0**: generated and baseline distributions are identical\n", + "- **MMD > 0**: they differ (larger = more different)\n", + "- The kernel bandwidth $\\sigma$ controls the scale of comparison\n", + "\n", + "### Why compare generated vs baseline?\n", + "\n", + "Our generator is **conditional** -- it takes test conditions and produces\n", + "designs. The baseline contains the ground-truth optima for those *same*\n", + "test conditions. Comparing generated vs baseline directly measures whether\n", + "the generator has learned to produce the right designs for the right conditions.\n", + "\n", + "### Choosing sigma without test-data leakage\n", + "\n", + "The Gaussian kernel bandwidth $\\sigma$ determines what scale of difference\n", + "the kernel is sensitive to. We set it using the **median heuristic** on the\n", + "*training data only* -- the median pairwise distance among training designs.\n", + "This avoids leaking test information into the metric while ensuring the\n", + "kernel operates in a meaningful range.\n", + "\n", + "### Why MMD and not just \"average quality\"?\n", + "\n", + "A model could produce 24 copies of the single best design. Per-sample metrics\n", + "would look great! But the *distribution* would be nothing like the diverse\n", + "baseline set. MMD catches this." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Visual intuition: where does each set place material?\n", + "show_spatial_distribution_comparison(gen_designs, baseline_designs, train_reference)\n", + "" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# PUBLIC FILL-IN CELL 02-B\n", + "# Goal: compute MMD between generated designs and baseline designs (same conditions).\n", + "#\n", + "# MMD uses a Gaussian (RBF) kernel: k(x,y) = exp(-||x-y||^2 / (2*sigma^2))\n", + "#\n", + "# We set sigma from the TRAINING data (median heuristic) to avoid test-data leakage,\n", + "# then apply that fixed sigma to the gen-vs-baseline comparison.\n", + "#\n", + "# You have:\n", + "# gen_designs -- (N, H, W) numpy array of generated designs\n", + "# baseline_designs -- (N, H, W) numpy array of optimized designs (same conditions)\n", + "# train_reference -- (M, H, W) numpy array of training designs\n", + "# cdist -- from scipy.spatial.distance (already imported)\n", + "#\n", + "# Steps:\n", + "# 1. Flatten all design sets to 2D\n", + "# 2. Compute sigma from training pairwise distances (median heuristic)\n", + "# 3. Compute pairwise squared distances between generated and baseline\n", + "# 4. Apply Gaussian kernel: K = exp(-D / (2 * sigma^2))\n", + "# 5. MMD = mean(K_gg) + mean(K_bb) - 2 * mean(K_gb)\n", + "\n", + "# START FILL ---------------------------------------------------------------\n", + "# 1. Flatten\n", + "gen_flat = None # TODO: reshape gen_designs to (N, H*W)\n", + "base_flat = None # TODO: reshape baseline_designs to (N, H*W)\n", + "ref_flat = None # TODO: reshape train_reference to (M, H*W)\n", + "\n", + "# 2. Sigma from training data only (no test leakage)\n", + "# Hint: compute pairwise sqeuclidean distances within ref_flat,\n", + "# then sigma = sqrt(median of those distances)\n", + "D_ref = None # TODO: cdist(ref_flat, ref_flat, \"sqeuclidean\")\n", + "sigma = None # TODO: float(np.sqrt(np.median(D_ref)))\n", + "\n", + "# 3. Pairwise squared distances for gen vs baseline\n", + "D_gg = None # TODO: cdist(gen_flat, gen_flat, \"sqeuclidean\")\n", + "D_bb = None # TODO: cdist(base_flat, base_flat, \"sqeuclidean\")\n", + "D_gb = None # TODO: cdist(gen_flat, base_flat, \"sqeuclidean\")\n", + "\n", + "# 4. Gaussian kernel\n", + "K_gg = None # TODO: np.exp(-D_gg / (2 * sigma**2))\n", + "K_bb = None # TODO: same for D_bb\n", + "K_gb = None # TODO: same for D_gb\n", + "\n", + "# 5. MMD\n", + "mmd_value = None # TODO: float(K_gg.mean() + K_bb.mean() - 2 * K_gb.mean())\n", + "\n", + "raise NotImplementedError(\"Fill in the TODOs above, then delete this line.\")\n", + "# END FILL -----------------------------------------------------------------\n", + "\n", + "print(f\"Sigma (median heuristic on training data): {sigma:.2f}\")\n", + "print(f\"MMD(generated, baseline) = {mmd_value:.6f}\")\n", + "print(f\" K_gg mean (gen-gen similarity): {K_gg.mean():.6f}\")\n", + "print(f\" K_bb mean (base-base similarity): {K_bb.mean():.6f}\")\n", + "print(f\" K_gb mean (cross similarity): {K_gb.mean():.6f}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# CHECKPOINT 02-B\n", + "assert mmd_value is not None, \"mmd_value is None -- did you compute it?\"\n", + "assert isinstance(mmd_value, float), \"mmd_value should be a float\"\n", + "assert mmd_value >= 0, f\"MMD should be non-negative, got {mmd_value}\"\n", + "assert sigma is not None and sigma > 1, f\"sigma should be > 1 for 10k-dim data (got {sigma}); did you use the median heuristic?\"\n", + "assert K_gg is not None and K_gb is not None, \"Kernel matrices not computed\"\n", + "print(f\"Checkpoint 02-B passed: MMD = {mmd_value:.6f} (sigma = {sigma:.2f})\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Put MMD in context: compare against reference points (same training-derived sigma)\n", + "#\n", + "# 1. Training sample vs baseline: what you'd get by grabbing random training\n", + "# designs instead of conditioning on the test conditions.\n", + "# 2. Random noise vs baseline: worst case (meaningless generator).\n", + "\n", + "rng_mmd = np.random.default_rng(42)\n", + "\n", + "# Training sample vs baseline (no conditioning)\n", + "train_sample_idx = rng_mmd.choice(len(train_reference), size=len(baseline_designs), replace=False)\n", + "train_sample_flat = train_reference[train_sample_idx].reshape(len(baseline_designs), -1)\n", + "D_tt = cdist(train_sample_flat, train_sample_flat, \"sqeuclidean\")\n", + "D_tb = cdist(train_sample_flat, base_flat, \"sqeuclidean\")\n", + "mmd_train_base = float(\n", + " np.exp(-D_tt / (2*sigma**2)).mean()\n", + " + K_bb.mean()\n", + " - 2 * np.exp(-D_tb / (2*sigma**2)).mean()\n", + ")\n", + "\n", + "# Random noise vs baseline\n", + "random_designs = rng_mmd.random(gen_designs.shape).astype(np.float32)\n", + "rand_flat = random_designs.reshape(random_designs.shape[0], -1)\n", + "D_rr = cdist(rand_flat, rand_flat, \"sqeuclidean\")\n", + "D_rb = cdist(rand_flat, base_flat, \"sqeuclidean\")\n", + "mmd_random_base = float(\n", + " np.exp(-D_rr / (2*sigma**2)).mean()\n", + " + K_bb.mean()\n", + " - 2 * np.exp(-D_rb / (2*sigma**2)).mean()\n", + ")\n", + "\n", + "print(f\"MMD reference points (sigma={sigma:.2f}, from training data):\")\n", + "print(f\" Generated vs Baseline: {mmd_value:.6f} (our model)\")\n", + "print(f\" Train sample vs Baseline: {mmd_train_base:.6f} (no conditioning)\")\n", + "print(f\" Random noise vs Baseline: {mmd_random_base:.6f} (worst case)\")\n", + "\n", + "show_mmd_comparison_bar(mmd_value, mmd_train_base, mmd_random_base)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# PCA embedding: where do generated designs live relative to training data?\n", + "show_embedding_scatter(gen_designs, baseline_designs, train_reference)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Reading the distributional similarity results\n", + "\n", + "- **Mean design images**: If the generated mean image looks similar to the\n", + " baseline mean image, the model has learned where material should go on\n", + " average for these conditions. Differences reveal spatial biases.\n", + "\n", + "- **Volume fraction distributions**: If the generated distribution is narrower\n", + " or shifted relative to baseline, the model isn't capturing the full range\n", + " of volume fractions needed for these test conditions.\n", + "\n", + "- **MMD in context**: The comparison bar chart places the generator's MMD on\n", + " a meaningful scale:\n", + " - **Train sample vs Baseline** (retrieval baseline): What you'd get by\n", + " grabbing random training designs instead of conditioning. If the\n", + " generator beats this, it has genuinely learned to condition.\n", + " - **Random vs Baseline** (worst case): Uniform noise -- the floor for\n", + " a non-functional generator.\n", + " A generator close to zero has matched the baseline distribution. A\n", + " generator near the train-sample bar is no better than memorising\n", + " training data without using the conditions.\n", + "\n", + "- **PCA embedding**: If generated designs (blue) cluster tightly in one\n", + " corner while training data (grey) spans a wide region, the model has\n", + " **mode collapse**. Ideally, blue points should overlap with the orange\n", + " baseline points (same conditions) while spanning a similar spread." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Part 5: Diversity & Coverage -- \"Did we explore?\"\n", + "\n", + "A generative model should produce **varied** designs, not 24 copies of the same\n", + "beam. We measure two complementary aspects:\n", + "\n", + "### Diversity (intra-set variation)\n", + "How different are the generated designs from *each other*?\n", + "- **Pairwise L2 distance**: Average Euclidean distance between all pairs of\n", + " generated designs. Higher = more diverse.\n", + "- **DPP diversity**: Determinantal Point Process log-determinant of the\n", + " similarity matrix. Captures both volume and spread of the set.\n", + "\n", + "### Novelty (distance to training data)\n", + "How different are the generated designs from the *training set*?\n", + "- **Nearest-neighbour distance**: For each generated design, find the closest\n", + " training example. If NN distance is near zero, the model may be memorising.\n", + " Higher = more novel.\n", + "\n", + "> **The diversity-quality trade-off:** A model that generates random noise\n", + "> would score very high on diversity but terribly on quality. We want designs\n", + "> that are diverse *and* feasible *and* performant. This is the fundamental\n", + "> tension in generative model evaluation." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_pairwise_distance_heatmap(gen_designs)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Compute diversity and novelty metrics\n", + "diversity_l2 = mean_pairwise_l2(gen_designs)\n", + "novelty_nn = mean_nn_distance_to_reference(gen_designs, train_reference)\n", + "\n", + "# Also compute for baseline as a reference point\n", + "baseline_diversity = mean_pairwise_l2(baseline_designs)\n", + "baseline_novelty = mean_nn_distance_to_reference(baseline_designs, train_reference)\n", + "\n", + "print(\"Diversity (mean pairwise L2):\")\n", + "print(f\" Generated: {diversity_l2:.2f}\")\n", + "print(f\" Baseline: {baseline_diversity:.2f}\")\n", + "print()\n", + "print(\"Novelty (mean NN distance to training):\")\n", + "print(f\" Generated: {novelty_nn:.2f}\")\n", + "print(f\" Baseline: {baseline_novelty:.2f}\")\n", + "print()\n", + "if diversity_l2 < baseline_diversity * 0.5:\n", + " print(\"Warning: Generated diversity is much lower than baseline -- possible mode collapse.\")\n", + "elif diversity_l2 > baseline_diversity * 1.5:\n", + " print(\"Note: Generated diversity exceeds baseline -- check if the extra variation is meaningful.\")\n", + "else:\n", + " print(\"Generated diversity is comparable to baseline diversity.\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Reading the diversity results\n", + "\n", + "- **Pairwise heatmap**: Uniform warm colours = good diversity (all designs differ\n", + " from each other). A block of cool/dark colours = a cluster of near-identical\n", + " designs (partial mode collapse).\n", + "\n", + "- **Diversity vs baseline**: The baseline designs come from an optimiser run on\n", + " diverse conditions, so they naturally vary. If the generator's diversity is\n", + " much lower, it is producing less variety than the problem demands.\n", + "\n", + "- **Novelty**: Very low NN distance means the generator is reproducing training\n", + " examples almost exactly. Some proximity is expected (it learned from them),\n", + " but near-zero distance suggests memorisation rather than generalisation." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Part 6: Optimization Warmstarting -- \"Does it speed up search?\"\n", + "\n", + "The **ultimate downstream test** for a generative model in engineering design:\n", + "if we use its output as a *starting point* for topology optimisation, does the\n", + "optimiser converge faster or find better solutions than starting from scratch?\n", + "\n", + "### The optimality gap metrics\n", + "\n", + "Starting from a generated design, we run the problem's optimiser and track the\n", + "objective at each step:\n", + "\n", + "- **IOG (Initial Optimality Gap)** = objective at step 0 minus baseline optimum.\n", + " *How good is the starting point?*\n", + "\n", + "- **FOG (Final Optimality Gap)** = objective at final step minus baseline optimum.\n", + " *How good is the final result?*\n", + "\n", + "- **COG (Cumulative Optimality Gap)** = sum of all per-step gaps.\n", + " *How much total \"wasted effort\" occurred across the trajectory?*\n", + " The shaded area in the trajectory plot.\n", + "\n", + "```\n", + "Objective\n", + " ^\n", + " | * IOG = obj[0] - baseline\n", + " | \\ *\n", + " | \\ * * Shaded area = COG\n", + " | \\ * * *\n", + " | ─ ─ ─ ─ ─ ─ ─ ─ FOG = obj[-1] - baseline\n", + " | - - - - - - - - - - - ← baseline (optimised reference)\n", + " └────────────────────────> Step\n", + "```\n", + "\n", + "- IOG < 0 is ideal: the generated design is *already better* than the baseline\n", + "- FOG ≈ 0: the optimiser recovers to baseline quality regardless of start\n", + "- Small COG: the optimiser converges quickly from this warmstart" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# DEMO: Optimization warmstarting on a small subset (3 samples)\n", + "# This runs the EngiBench optimiser from each generated design and tracks the trajectory.\n", + "# We use only 3 samples because optimization is slower than simulation.\n", + "\n", + "problem_opt = BUILTIN_PROBLEMS[PROBLEM_ID](seed=7)\n", + "n_opt_demo = min(3, len(gen_designs))\n", + "opt_data = []\n", + "\n", + "for i in range(n_opt_demo):\n", + " cfg = dict(conditions[i])\n", + "\n", + " # Run optimiser from generated design\n", + " problem_opt.reset(seed=7 + i)\n", + " _, opt_history = problem_opt.optimize(gen_designs[i], config=cfg)\n", + "\n", + " # Get baseline objective for reference\n", + " problem_opt.reset(seed=7 + i)\n", + " base_obj = float(problem_opt.simulate(baseline_designs[i], config=cfg)[0])\n", + "\n", + " # Extract objective trajectory\n", + " obj_trajectory = [float(step.obj_values) for step in opt_history]\n", + "\n", + " opt_data.append({\n", + " \"sample_idx\": i,\n", + " \"obj_trajectory\": obj_trajectory,\n", + " \"base_obj\": base_obj,\n", + " })\n", + " iog = obj_trajectory[0] - base_obj\n", + " fog = obj_trajectory[-1] - base_obj\n", + " cog = sum(o - base_obj for o in obj_trajectory)\n", + " print(f\"Sample {i}: IOG={iog:.1f} FOG={fog:.1f} COG={cog:.1f} ({len(opt_history)} steps)\")\n", + "\n", + "print(f\"\\nOptimization complete for {n_opt_demo} samples.\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_optimization_trajectories(opt_data)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Reading the optimization trajectories\n", + "\n", + "- **Steep drop at step 0→1**: The generated design was far from optimal, but the\n", + " optimiser quickly improved it. This still counts as a useful warmstart if\n", + " the total trajectory (COG) is shorter than starting from scratch.\n", + "\n", + "- **Flat trajectory near baseline**: The generated design was already near-optimal\n", + " and the optimiser had little work to do. Best-case scenario.\n", + "\n", + "- **Trajectory above baseline throughout**: The generated design was so far from\n", + " optimal that even after optimisation it never reached baseline quality. This\n", + " suggests the model is producing designs in the wrong region of design space.\n", + "\n", + "**In practice**, you would run this on many more samples and average the IOG/COG/FOG\n", + "to get statistically robust estimates. For the workshop, 3 samples illustrate\n", + "the concept." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Part 7: Putting It All Together\n", + "\n", + "Now we aggregate all the metrics from Parts 2-6 into a single summary table.\n", + "This is the kind of table you would report in a paper or use to compare\n", + "different generative models." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# PUBLIC FILL-IN CELL 02-C\n", + "# Goal: build a comprehensive summary dict and wrap it in a DataFrame.\n", + "#\n", + "# You have:\n", + "# results -- per-sample DataFrame from Part 2\n", + "# mmd_value -- MMD from Part 4\n", + "# diversity_l2 -- from Part 5\n", + "# novelty_nn -- from Part 5\n", + "# opt_data -- optimization results from Part 6\n", + "\n", + "# START FILL ---------------------------------------------------------------\n", + "# Compute average IOG/FOG/COG from the optimization demo\n", + "avg_iog = None # TODO: mean of (first obj - base_obj) across opt_data\n", + "avg_fog = None # TODO: mean of (last obj - base_obj) across opt_data\n", + "avg_cog = None # TODO: mean of (sum of gaps) across opt_data\n", + "\n", + "summary = {\n", + " # Simulation performance\n", + " \"n_samples\": len(results),\n", + " \"gen_obj_mean\": None, # TODO: mean of gen_obj column\n", + " \"base_obj_mean\": None, # TODO: mean of base_obj column\n", + " \"objective_gap_mean\": None, # TODO: mean of gen_minus_base column\n", + " \"improvement_rate\": None, # TODO: fraction where gen_obj < base_obj\n", + " # Constraint satisfaction\n", + " \"gen_feasible_rate\": None, # TODO: fraction of feasible generated designs\n", + " \"base_feasible_rate\": None, # TODO: fraction of feasible baseline designs\n", + " \"gen_violation_ratio\": None, # TODO: 1 - gen_feasible_rate\n", + " \"base_violation_ratio\": None, # TODO: 1 - base_feasible_rate\n", + " # Distributional similarity\n", + " \"mmd\": mmd_value,\n", + " # Diversity & novelty\n", + " \"gen_diversity_l2\": diversity_l2,\n", + " \"gen_novelty_to_train_l2\": novelty_nn,\n", + " # Optimization warmstarting (from demo subset)\n", + " \"avg_iog\": avg_iog,\n", + " \"avg_fog\": avg_fog,\n", + " \"avg_cog\": avg_cog,\n", + "}\n", + "\n", + "summary_df = None # TODO: pd.DataFrame([summary])\n", + "\n", + "raise NotImplementedError(\"Fill in the TODOs above, then delete this line.\")\n", + "# END FILL -----------------------------------------------------------------\n", + "\n", + "# Show the summary transposed for readability (one metric per row)\n", + "display(summary_df.T.rename(columns={0: \"value\"}))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# CHECKPOINT 02-C\n", + "assert \"summary_df\" in dir() and summary_df is not None, \"Define summary_df\"\n", + "assert len(summary_df) == 1, \"summary_df should have exactly one row\"\n", + "required_keys = {\n", + " \"n_samples\", \"gen_obj_mean\", \"base_obj_mean\", \"objective_gap_mean\",\n", + " \"improvement_rate\", \"gen_feasible_rate\", \"base_feasible_rate\",\n", + " \"gen_violation_ratio\", \"base_violation_ratio\",\n", + " \"mmd\", \"gen_diversity_l2\", \"gen_novelty_to_train_l2\",\n", + " \"avg_iog\", \"avg_fog\", \"avg_cog\",\n", + "}\n", + "missing = required_keys - set(summary_df.columns)\n", + "assert not missing, f\"Missing summary columns: {missing}\"\n", + "assert summary_df[\"gen_obj_mean\"].notna().all(), \"gen_obj_mean is NaN\"\n", + "print(\"Checkpoint 02-C passed: comprehensive summary table is complete.\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_metric_summary_dashboard(summary)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Export artifacts" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "results_path = ARTIFACT_DIR / \"per_sample_metrics.csv\"\n", + "summary_path = ARTIFACT_DIR / \"metrics_summary.csv\"\n", + "\n", + "results.to_csv(results_path, index=False)\n", + "summary_df.to_csv(summary_path, index=False)\n", + "\n", + "# Save objective histogram\n", + "hist_path = ARTIFACT_DIR / \"objective_histogram.png\"\n", + "fig, ax = plt.subplots(figsize=(7, 4))\n", + "ax.hist(results[\"gen_obj\"], bins=10, alpha=0.7, label=\"Generated\", color=\"#4C72B0\")\n", + "ax.hist(results[\"base_obj\"], bins=10, alpha=0.7, label=\"Baseline\", color=\"#DD8452\")\n", + "ax.set_xlabel(\"Compliance (lower is better)\")\n", + "ax.set_ylabel(\"Count\")\n", + "ax.set_title(\"Generated vs baseline objective distribution\")\n", + "ax.legend()\n", + "fig.tight_layout()\n", + "fig.savefig(hist_path, dpi=150)\n", + "plt.close(fig)\n", + "\n", + "# Save scatter plot\n", + "scatter_path = ARTIFACT_DIR / \"objective_scatter.png\"\n", + "fig, ax = plt.subplots(figsize=(5, 5))\n", + "ax.scatter(results[\"base_obj\"], results[\"gen_obj\"], alpha=0.8)\n", + "lo = min(results[\"base_obj\"].min(), results[\"gen_obj\"].min()) * 0.9\n", + "hi = max(results[\"base_obj\"].max(), results[\"gen_obj\"].max()) * 1.1\n", + "ax.plot([lo, hi], [lo, hi], \"--\", color=\"gray\", linewidth=1)\n", + "ax.set_xlabel(\"Baseline compliance\")\n", + "ax.set_ylabel(\"Generated compliance\")\n", + "ax.set_title(\"Per-sample objective comparison\")\n", + "fig.tight_layout()\n", + "fig.savefig(scatter_path, dpi=150)\n", + "plt.close(fig)\n", + "\n", + "# Save design grid\n", + "grid_path = ARTIFACT_DIR / \"design_grid.png\"\n", + "fig, axes_grid = plt.subplots(2, min(6, len(gen_designs)), figsize=(14, 5))\n", + "for i in range(min(6, len(gen_designs))):\n", + " axes_grid[0, i].imshow(gen_designs[i], cmap=\"gray\", vmin=0, vmax=1)\n", + " axes_grid[0, i].set_title(f\"gen {i}\", fontsize=9)\n", + " axes_grid[0, i].axis(\"off\")\n", + " axes_grid[1, i].imshow(baseline_designs[i], cmap=\"gray\", vmin=0, vmax=1)\n", + " axes_grid[1, i].set_title(f\"base {i}\", fontsize=9)\n", + " axes_grid[1, i].axis(\"off\")\n", + "fig.tight_layout()\n", + "fig.savefig(grid_path, dpi=150)\n", + "plt.close(fig)\n", + "\n", + "print(\"Exported:\")\n", + "for p in [results_path, summary_path, hist_path, scatter_path, grid_path]:\n", + " print(f\" {p}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Discussion prompts\n", + "\n", + "Use these questions to prepare for the workshop breakout discussion. There are no\n", + "\"right\" answers -- the goal is to develop your own informed perspective.\n", + "\n", + "1. **Which metric category matters most for your domain?** In safety-critical\n", + " applications (aerospace, medical devices), constraint satisfaction is a hard\n", + " requirement. In early-stage concept exploration, diversity might matter more.\n", + " What about your own research area?\n", + "\n", + "2. **When do metrics disagree?** A model might score well on MMD (distributional\n", + " match) but poorly on per-sample objective (simulation performance). What does\n", + " that disagreement tell you? Which metric would you trust more?\n", + "\n", + "3. **Is diversity always good?** A model that produces wildly different designs\n", + " scores high on diversity -- but some of those designs might be nonsensical.\n", + " When does high diversity indicate a problem rather than a strength?\n", + "\n", + "4. **The warmstarting test.** If a model's IOG is poor (bad starting points) but\n", + " FOG is near zero (optimiser recovers), is the model useful? What if IOG is\n", + " great but the optimiser diverges (FOG increases)?\n", + "\n", + "5. **When would you trust these results for a paper?** We evaluated 24 samples\n", + " with a model trained for 8 epochs on 512 examples. What would need to change\n", + " to make these numbers publication-ready? (Think: sample size, training budget,\n", + " statistical significance, multiple seeds.)\n", + "\n", + "6. **Objective vs feasibility trade-off.** If your model produces designs with\n", + " great compliance but poor volume-fraction adherence, is that progress or a\n", + " failure? How would you communicate this nuance in a benchmark table?" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Reflection: what did you learn in NB02?\n", + "\n", + "Before closing, write down your answers to these prompts:\n", + "\n", + "1. **What do the metrics tell you about your model?** Look at your summary table.\n", + " Where does the generator excel, and where does it fall short? Which metric\n", + " surprised you most?\n", + "\n", + "2. **Which visualisation was most informative?** Was it the residual heatmaps,\n", + " the PCA embedding, the optimization trajectories, or something else? Why?\n", + "\n", + "3. **What would a full benchmark study add?** A complete EngiBench evaluation\n", + " would test across multiple problems, multiple seeds, larger sample sizes, and\n", + " the full metric suite (MMD, DPP, IOG/COG/FOG, violation ratio). How would\n", + " that change your confidence in the conclusions?\n", + "\n", + "4. **How would you improve the generator?** Based on the diagnostic pattern you\n", + " see (which categories are strong vs weak), what would you change about the\n", + " model architecture, training procedure, or data pipeline?" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Troubleshooting\n", + "\n", + "If a section fails, do not continue downstream. Fix the failing cell first, then\n", + "rerun it and its checkpoint before moving on. The notebook is staged so that\n", + "failures are localised." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "name": "python", + "version": "3.10.0" + }, + "accelerator": "GPU", + "colab": { + "provenance": [], + "gpuType": "T4" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/workshops/dcc26/participant/03_add_new_problem_scaffold.ipynb b/workshops/dcc26/participant/03_add_new_problem_scaffold.ipynb new file mode 100644 index 0000000..8801e5c --- /dev/null +++ b/workshops/dcc26/participant/03_add_new_problem_scaffold.ipynb @@ -0,0 +1,785 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Notebook 03 (Participant): Add a New Problem to EngiBench\n", + "\n", + "**Time budget: ~25 minutes** | 3 fill-in exercises | Mostly guided walkthrough\n", + "\n", + "In this notebook you will see how to wrap a **new simulator** as an EngiBench `Problem`,\n", + "so that every model in EngiOpt can immediately train on it with zero code changes.\n", + "\n", + "We will build a **planar 2-link robot manipulator co-design problem**: choose link\n", + "lengths, motor strength, and control gains so the arm reaches a target with minimal\n", + "tracking error and energy." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Edit-safe start:** this notebook opens from GitHub in read-only source mode. Use **File -> Save a copy in Drive** before running edits so your changes stay in your own workspace." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Notebook map\n", + "\n", + "This notebook is a **guided walkthrough** with 3 small fill-in exercises.\n", + "Most code is pre-written -- your job is to **read, run, and understand** the\n", + "EngiBench Problem contract, then fill in 3 targeted methods.\n", + "\n", + "### Public exercise legend\n", + "- `PUBLIC FILL-IN CELL`: implement this method (skeleton + hints provided).\n", + "- `CHECKPOINT`: run and verify before continuing.\n", + "- Pre-written cells: read and run -- these are fully working code." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## The problem: Planar manipulator co-design\n", + "\n", + "Imagine a simple robot arm bolted to a table. It has **two rigid links**\n", + "connected by revolute joints, and it needs to reach a target point in 2D space.\n", + "\n", + "```\n", + " target\n", + " X (target_x, target_y)\n", + " /\n", + " / link 2 (length l2)\n", + " /\n", + " joint 2\n", + " /\n", + " / link 1 (length l1)\n", + " /\n", + " joint 1\n", + " *------------ table / base\n", + "```\n", + "\n", + "**What we design** (the design vector, 6 variables):\n", + "\n", + "| Index | Variable | Range | Meaning |\n", + "|-------|----------|-------|---------|\n", + "| 0 | `link1_m` | 0.25 -- 1.00 | Length of link 1 (meters) |\n", + "| 1 | `link2_m` | 0.20 -- 0.95 | Length of link 2 (meters) |\n", + "| 2 | `motor_strength` | 2.0 -- 30.0 | Motor torque multiplier |\n", + "| 3 | `kp` | 5.0 -- 120.0 | Proportional control gain |\n", + "| 4 | `kd` | 0.2 -- 18.0 | Derivative control gain |\n", + "| 5 | `damping` | 0.0 -- 1.5 | Joint damping coefficient |\n", + "\n", + "**Conditions** (set by the environment, not the designer):\n", + "- `target_x`, `target_y`: where the arm must reach\n", + "- `payload_kg`: mass at the end-effector\n", + "- `disturbance_scale`: random torque noise during simulation\n", + "\n", + "**Objectives** (both minimized):\n", + "1. `final_tracking_error_m`: how far the end-effector is from the target at the end\n", + "2. `actuation_energy_j`: total energy spent by the motors\n", + "\n", + "**Why this is a co-design problem**: we are simultaneously choosing the *hardware*\n", + "(link lengths, motor) and the *controller* (gains, damping). This is exactly the\n", + "kind of coupled design problem where generative models can help explore the space." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## The EngiBench Problem contract\n", + "\n", + "Every problem in EngiBench implements the same interface. This is what makes it\n", + "possible to train **any** EngiOpt model on **any** problem with zero model code changes.\n", + "\n", + "The key pieces:\n", + "\n", + "| Attribute / Method | Purpose |\n", + "|---|---|\n", + "| `design_space` | A `gymnasium.spaces.Box` defining valid designs |\n", + "| `objectives` | Tuple of `(name, direction)` pairs |\n", + "| `conditions` | Dataclass of environmental conditions |\n", + "| `design_constraints` | List of constraint functions |\n", + "| `check_constraints(design, config)` | Returns list of violations (empty = feasible) |\n", + "| `simulate(design, config)` | Runs the simulator, returns objective values |\n", + "| `optimize(start, config)` | Simple optimizer, returns `(best_design, history)` |\n", + "| `render(design)` | Visualization for human inspection |\n", + "| `random_design()` | Sample a random valid design |\n", + "\n", + "In this notebook, most of these are **pre-written**. You will fill in 3 methods\n", + "that test your understanding of the contract." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Colab/local dependency bootstrap\n", + "import subprocess\n", + "import sys\n", + "\n", + "IN_COLAB = \"google.colab\" in sys.modules\n", + "FORCE_INSTALL = False # Set True to force install outside Colab\n", + "\n", + "\n", + "def pip_install(packages: list[str]):\n", + " cmd = [sys.executable, \"-m\", \"pip\", \"install\", *packages]\n", + " print(\"Running:\", \" \".join(cmd))\n", + " subprocess.check_call(cmd)\n", + "\n", + "\n", + "BASE_PACKAGES = [\"engibench[all]\", \"matplotlib\", \"gymnasium\", \"pybullet\"]\n", + "ENGIOPT_GIT = \"git+https://github.com/IDEALLab/EngiOpt.git@codex/dcc26-workshop-notebooks#egg=engiopt\"\n", + "\n", + "if IN_COLAB or FORCE_INSTALL:\n", + " print(\"Installing dependencies...\")\n", + " pip_install(BASE_PACKAGES)\n", + " pip_install([ENGIOPT_GIT])\n", + "\n", + " try:\n", + " import torch # noqa: F401\n", + " except Exception:\n", + " pip_install([\"torch\", \"torchvision\"])\n", + "\n", + " print(\"Dependency install complete.\")\n", + "else:\n", + " print(\"Skipping install (using current environment). Set FORCE_INSTALL=True to install here.\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Step 1 -- Imports\n", + "\n", + "These are the EngiBench building blocks we need to define a Problem." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from __future__ import annotations\n", + "\n", + "from dataclasses import dataclass\n", + "from typing import Annotated\n", + "\n", + "import numpy as np\n", + "from gymnasium import spaces\n", + "\n", + "from engibench.constraint import bounded\n", + "from engibench.constraint import constraint\n", + "from engibench.core import ObjectiveDirection\n", + "from engibench.core import OptiStep\n", + "from engibench.core import Problem\n", + "\n", + "import pybullet as p" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Step 2 -- Build the Problem class (guided walkthrough + 3 fill-ins)\n", + "\n", + "The cell below contains the **complete** `PlanarManipulatorCoDesignProblem` class.\n", + "Most methods are pre-written and working. **Three methods** are left for you to fill in.\n", + "\n", + "Read through the pre-written code to understand the structure, then complete:\n", + "\n", + "1. **Fill-in 03-A** (`simulate`): Merge config, clip design to bounds, call the rollout. A short wrapper method.\n", + "2. **Fill-in 03-B** (`random_design`): Sample a design from the design space. Essentially a one-liner.\n", + "3. **Fill-in 03-C** (`optimize`): Wire up a simple random-perturbation search loop using the hints provided.\n", + "\n", + "The pre-written methods handle all the PyBullet complexity -- you do NOT need to\n", + "understand robotics or physics simulation to complete the exercises." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Pre-written methods tour (read before filling in)\n", + "\n", + "Here is a quick guide to the pre-written methods you will see in the class:\n", + "\n", + "- **`__init__`**: Sets up the design space (6-dim Box), conditions, and constraints.\n", + "- **`_build_robot`**: Creates a 2-link arm in PyBullet with configurable link lengths and damping.\n", + "- **`_inverse_kinematics_2link`**: Given a target (x, y), computes the joint angles using the law of cosines. Standard closed-form 2-link IK.\n", + "- **`_forward_kinematics_2link`**: Given joint angles, computes end-effector (x, y). Simple trig.\n", + "- **`_rollout`**: Runs the full PyBullet simulation -- sets up PD control to track the target, applies disturbances, records tracking error and energy at each step.\n", + "- **`optimize`**: Random search over the design space -- tries perturbations, keeps the best.\n", + "- **`render`**: 4-panel matplotlib figure showing design variables, end-effector path, tracking error, and joint torques." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class PlanarManipulatorCoDesignProblem(Problem[np.ndarray]):\n", + " \"\"\"Robotics co-design problem: choose arm geometry + controller to reach a target.\n", + "\n", + " This wraps a PyBullet physics simulation as an EngiBench Problem so that\n", + " any EngiOpt generative model can train on it.\n", + " \"\"\"\n", + "\n", + " version = 0\n", + " objectives = (\n", + " (\"final_tracking_error_m\", ObjectiveDirection.MINIMIZE),\n", + " (\"actuation_energy_j\", ObjectiveDirection.MINIMIZE),\n", + " )\n", + "\n", + " @dataclass\n", + " class Conditions:\n", + " target_x: Annotated[float, bounded(lower=0.20, upper=1.35)] = 0.85\n", + " target_y: Annotated[float, bounded(lower=0.05, upper=1.20)] = 0.45\n", + " payload_kg: Annotated[float, bounded(lower=0.0, upper=2.0)] = 0.8\n", + " disturbance_scale: Annotated[float, bounded(lower=0.0, upper=0.30)] = 0.05\n", + "\n", + " @dataclass\n", + " class Config(Conditions):\n", + " sim_steps: Annotated[int, bounded(lower=60, upper=1200)] = 240\n", + " dt: Annotated[float, bounded(lower=1e-4, upper=0.05)] = 1.0 / 120.0\n", + " torque_limit: Annotated[float, bounded(lower=1.0, upper=50.0)] = 12.0\n", + " max_iter: Annotated[int, bounded(lower=1, upper=300)] = 60\n", + "\n", + " dataset_id = \"IDEALLab/planar_manipulator_codesign_v0\" # placeholder\n", + " container_id = None\n", + "\n", + " # ------------------------------------------------------------------ #\n", + " # __init__ (pre-written)\n", + " # ------------------------------------------------------------------ #\n", + " def __init__(self, seed: int = 0, **kwargs):\n", + " super().__init__(seed=seed)\n", + " self.config = self.Config(**kwargs)\n", + " self.conditions = self.Conditions(\n", + " target_x=self.config.target_x,\n", + " target_y=self.config.target_y,\n", + " payload_kg=self.config.payload_kg,\n", + " disturbance_scale=self.config.disturbance_scale,\n", + " )\n", + "\n", + " # Design vector = [link1_m, link2_m, motor_strength, kp, kd, damping]\n", + " self.design_space = spaces.Box(\n", + " low=np.array([0.25, 0.20, 2.0, 5.0, 0.2, 0.0], dtype=np.float32),\n", + " high=np.array([1.00, 0.95, 30.0, 120.0, 18.0, 1.5], dtype=np.float32),\n", + " dtype=np.float32,\n", + " )\n", + "\n", + " # --- Constraints ------------------------------------------------\n", + " # These use the @constraint decorator from EngiBench.\n", + " # A constraint function receives (design, **config_kwargs).\n", + " # It should ASSERT what must be true. If the assert fails,\n", + " # check_constraints() catches it and reports a violation.\n", + "\n", + " @constraint\n", + " def reachable_workspace(design: np.ndarray, target_x: float, target_y: float, **_) -> None:\n", + " l1, l2 = float(design[0]), float(design[1])\n", + " r = float(np.sqrt(target_x**2 + target_y**2))\n", + " assert l1 + l2 >= r + 0.03, f\"target radius {r:.3f} exceeds reach {l1 + l2:.3f}\"\n", + "\n", + " @constraint\n", + " def gain_consistency(design: np.ndarray, **_) -> None:\n", + " kp, kd = float(design[3]), float(design[4])\n", + " assert kd <= 2.2 * np.sqrt(max(kp, 1e-6)), f\"kd={kd:.3f} too high for kp={kp:.3f}\"\n", + "\n", + " self.design_constraints = [reachable_workspace, gain_consistency]\n", + "\n", + " # ------------------------------------------------------------------ #\n", + " # _build_robot (pre-written -- PyBullet internals)\n", + " # ------------------------------------------------------------------ #\n", + " def _build_robot(self, l1: float, l2: float, payload_kg: float, damping: float) -> tuple[int, int]:\n", + " \"\"\"Create a 2-link planar arm in PyBullet. Returns (robot_id, ee_link_index).\"\"\"\n", + " p.resetSimulation()\n", + " p.setGravity(0, 0, -9.81)\n", + "\n", + " link_masses = [0.5 + 0.2 * payload_kg, 0.35 + 0.25 * payload_kg]\n", + " link_collision = [-1, -1]\n", + " link_visual = [\n", + " p.createVisualShape(p.GEOM_CAPSULE, radius=0.025, length=l1, rgbaColor=[0.2, 0.5, 0.9, 1.0]),\n", + " p.createVisualShape(p.GEOM_CAPSULE, radius=0.020, length=l2, rgbaColor=[0.9, 0.4, 0.2, 1.0]),\n", + " ]\n", + " qx = p.getQuaternionFromEuler([0.0, np.pi / 2.0, 0.0])\n", + "\n", + " robot = p.createMultiBody(\n", + " baseMass=0.0,\n", + " baseCollisionShapeIndex=-1,\n", + " baseVisualShapeIndex=-1,\n", + " basePosition=[0, 0, 0],\n", + " linkMasses=link_masses,\n", + " linkCollisionShapeIndices=link_collision,\n", + " linkVisualShapeIndices=link_visual,\n", + " linkPositions=[[0, 0, 0], [l1, 0, 0]],\n", + " linkOrientations=[qx, qx],\n", + " linkInertialFramePositions=[[l1 / 2.0, 0, 0], [l2 / 2.0, 0, 0]],\n", + " linkInertialFrameOrientations=[[0, 0, 0, 1], [0, 0, 0, 1]],\n", + " linkParentIndices=[0, 1],\n", + " linkJointTypes=[p.JOINT_REVOLUTE, p.JOINT_REVOLUTE],\n", + " linkJointAxis=[[0, 0, 1], [0, 0, 1]],\n", + " )\n", + "\n", + " for j in [0, 1]:\n", + " p.changeDynamics(robot, j, linearDamping=0.0, angularDamping=float(damping))\n", + "\n", + " return robot, 1\n", + "\n", + " # ------------------------------------------------------------------ #\n", + " # _inverse_kinematics_2link (pre-written -- standard 2-link IK)\n", + " # ------------------------------------------------------------------ #\n", + " def _inverse_kinematics_2link(self, x: float, y: float, l1: float, l2: float) -> tuple[float, float]:\n", + " \"\"\"Closed-form IK for a 2-link planar arm using the law of cosines.\"\"\"\n", + " r2 = x * x + y * y\n", + " c2 = (r2 - l1 * l1 - l2 * l2) / (2.0 * l1 * l2)\n", + " c2 = float(np.clip(c2, -1.0, 1.0))\n", + " s2 = float(np.sqrt(max(0.0, 1.0 - c2 * c2)))\n", + " q2 = float(np.arctan2(s2, c2))\n", + " q1 = float(np.arctan2(y, x) - np.arctan2(l2 * s2, l1 + l2 * c2))\n", + " return q1, q2\n", + "\n", + " # ------------------------------------------------------------------ #\n", + " # _forward_kinematics_2link (pre-written -- simple trig)\n", + " # ------------------------------------------------------------------ #\n", + " def _forward_kinematics_2link(self, q1: float, q2: float, l1: float, l2: float) -> tuple[float, float]:\n", + " \"\"\"Compute end-effector (x, y) from joint angles and link lengths.\"\"\"\n", + " x = l1 * np.cos(q1) + l2 * np.cos(q1 + q2)\n", + " y = l1 * np.sin(q1) + l2 * np.sin(q1 + q2)\n", + " return float(x), float(y)\n", + "\n", + " # ------------------------------------------------------------------ #\n", + " # _rollout (pre-written -- runs the full PyBullet simulation)\n", + " # ------------------------------------------------------------------ #\n", + " def _rollout(self, design: np.ndarray, cfg: dict, return_trace: bool = False):\n", + " \"\"\"Run PyBullet simulation with PD control. Returns objective vector.\"\"\"\n", + " l1, l2, motor_strength, kp, kd, damping = [float(v) for v in design]\n", + "\n", + " cid = p.connect(p.DIRECT)\n", + " try:\n", + " robot, _ = self._build_robot(l1, l2, cfg[\"payload_kg\"], damping)\n", + " q1_t, q2_t = self._inverse_kinematics_2link(cfg[\"target_x\"], cfg[\"target_y\"], l1, l2)\n", + "\n", + " err_trace = []\n", + " tau_trace = []\n", + " ee_trace = []\n", + " energy = 0.0\n", + "\n", + " for _step in range(int(cfg[\"sim_steps\"])):\n", + " for j, q_t in enumerate([q1_t, q2_t]):\n", + " p.setJointMotorControl2(\n", + " bodyUniqueId=robot,\n", + " jointIndex=j,\n", + " controlMode=p.POSITION_CONTROL,\n", + " targetPosition=q_t,\n", + " positionGain=float(kp) / 120.0,\n", + " velocityGain=float(kd) / 50.0,\n", + " force=float(cfg[\"torque_limit\"]) * float(motor_strength),\n", + " )\n", + "\n", + " if cfg[\"disturbance_scale\"] > 0:\n", + " disturb = self.np_random.normal(0.0, cfg[\"disturbance_scale\"], size=2)\n", + " p.applyExternalTorque(robot, 0, [0, 0, float(disturb[0])], p.LINK_FRAME)\n", + " p.applyExternalTorque(robot, 1, [0, 0, float(disturb[1])], p.LINK_FRAME)\n", + "\n", + " p.stepSimulation()\n", + "\n", + " js0 = p.getJointState(robot, 0)\n", + " js1 = p.getJointState(robot, 1)\n", + " q1, q2 = float(js0[0]), float(js1[0])\n", + " dq1, dq2 = float(js0[1]), float(js1[1])\n", + " tau1, tau2 = float(js0[3]), float(js1[3])\n", + "\n", + " ee_x, ee_y = self._forward_kinematics_2link(q1, q2, l1, l2)\n", + " err = float(np.sqrt((ee_x - cfg[\"target_x\"]) ** 2 + (ee_y - cfg[\"target_y\"]) ** 2))\n", + "\n", + " err_trace.append(err)\n", + " tau_trace.append((tau1, tau2))\n", + " ee_trace.append((ee_x, ee_y))\n", + " energy += (abs(tau1 * dq1) + abs(tau2 * dq2)) * float(cfg[\"dt\"])\n", + "\n", + " final_error = float(err_trace[-1])\n", + " obj = np.array([final_error, float(energy)], dtype=np.float32)\n", + "\n", + " if return_trace:\n", + " trace = {\n", + " \"ee_trace\": np.array(ee_trace, dtype=np.float32),\n", + " \"err_trace\": np.array(err_trace, dtype=np.float32),\n", + " \"tau_trace\": np.array(tau_trace, dtype=np.float32),\n", + " \"target\": np.array([cfg[\"target_x\"], cfg[\"target_y\"]], dtype=np.float32),\n", + " \"design\": np.array(design, dtype=np.float32),\n", + " \"objectives\": obj,\n", + " }\n", + " return obj, trace\n", + "\n", + " return obj\n", + " finally:\n", + " p.disconnect(cid)\n", + "\n", + " # ================================================================== #\n", + " # PUBLIC FILL-IN CELL 03-A: simulate\n", + " # ================================================================== #\n", + " def simulate(self, design: np.ndarray, config: dict | None = None) -> np.ndarray:\n", + " \"\"\"Run the simulator and return objective values.\n", + "\n", + " This is the main entry point that EngiOpt models call.\n", + " It should:\n", + " 1. Merge self.config defaults with any overrides from `config`\n", + " 2. Clip the design to the valid bounds\n", + " 3. Call self._rollout() and return the result\n", + " \"\"\"\n", + " # START FILL -------------------------------------------------------\n", + " # Hint: self.config.__dict__ gives you the default config as a dict.\n", + " # Use {**defaults, **(config or {})} to merge.\n", + " # np.clip(design, self.design_space.low, self.design_space.high)\n", + " # Return self._rollout(clipped_design, merged_cfg, return_trace=False)\n", + " raise NotImplementedError(\"Fill in simulate()\")\n", + " # END FILL ---------------------------------------------------------\n", + "\n", + " # ================================================================== #\n", + " # PUBLIC FILL-IN CELL 03-B: random_design\n", + " # ================================================================== #\n", + " # Note: check_constraints() is inherited from Problem. It calls each\n", + " # function in self.design_constraints and collects assertion failures.\n", + " # The constraints are defined in __init__ above -- look at them!\n", + " #\n", + " # This fill-in is about random_design(), which is used by the optimizer\n", + " # and by dataset generation to sample starting points.\n", + "\n", + " def random_design(self):\n", + " \"\"\"Return (design, reward) where design is sampled uniformly from bounds.\n", + "\n", + " Convention: reward = -1 (dummy value, since we have not simulated yet).\n", + " \"\"\"\n", + " # START FILL -------------------------------------------------------\n", + " # Hint: self.np_random.uniform(low, high) samples from the design space.\n", + " # self.design_space.low and .high give the bounds.\n", + " # Return (design_array, -1)\n", + " raise NotImplementedError(\"Fill in random_design()\")\n", + " # END FILL ---------------------------------------------------------\n", + "\n", + " # ================================================================== #\n", + " # PUBLIC FILL-IN CELL 03-C: optimize\n", + " # ================================================================== #\n", + " def optimize(self, starting_point: np.ndarray, config: dict | None = None):\n", + " \"\"\"Simple random-perturbation optimizer.\n", + "\n", + " Returns (best_design, history) where history is a list of OptiStep.\n", + " Each OptiStep records the best objective values seen so far at that step.\n", + "\n", + " Algorithm:\n", + " 1. Start from starting_point, evaluate it\n", + " 2. For each iteration: perturb the best design with Gaussian noise,\n", + " clip to bounds, check constraints, simulate, keep if better\n", + " 3. \"Better\" = lower score, where score = error + 0.02 * energy\n", + " \"\"\"\n", + " # START FILL -------------------------------------------------------\n", + " # Hint: Follow the docstring algorithm above.\n", + " # cfg = {**self.config.__dict__, **(config or {})}\n", + " # x = np.clip(starting_point, self.design_space.low, self.design_space.high)\n", + " # best, best_obj = x.copy(), self.simulate(x, cfg)\n", + " # best_score = float(best_obj[0] + 0.02 * best_obj[1])\n", + " # history = [OptiStep(obj_values=best_obj, step=0)]\n", + " # step_scale = np.array([0.05, 0.05, 2.5, 8.0, 1.2, 0.08], dtype=np.float32)\n", + " #\n", + " # Loop max_iter times:\n", + " # candidate = best + self.np_random.normal(...) * step_scale\n", + " # clip, check constraints, simulate, compare scores\n", + " # Append OptiStep to history\n", + " #\n", + " # return best, history\n", + " raise NotImplementedError(\"Fill in optimize()\")\n", + " # END FILL ---------------------------------------------------------\n", + "\n", + " # ------------------------------------------------------------------ #\n", + " # render (pre-written -- 4-panel visualization)\n", + " # ------------------------------------------------------------------ #\n", + " def render(self, design: np.ndarray, *, open_window: bool = False):\n", + " \"\"\"Create a 4-panel diagnostic figure for a given design.\"\"\"\n", + " import matplotlib.pyplot as plt\n", + "\n", + " cfg = self.config.__dict__\n", + " x = np.clip(design.astype(np.float32), self.design_space.low, self.design_space.high)\n", + " obj, trace = self._rollout(x, cfg, return_trace=True)\n", + "\n", + " ee = trace[\"ee_trace\"]\n", + " err = trace[\"err_trace\"]\n", + " target = trace[\"target\"]\n", + " tau = trace[\"tau_trace\"]\n", + "\n", + " fig, axes = plt.subplots(1, 4, figsize=(17, 4.2))\n", + "\n", + " labels = [\"link1\", \"link2\", \"motor\", \"kp\", \"kd\", \"damping\"]\n", + " axes[0].bar(labels, x, color=[\"#4c78a8\", \"#4c78a8\", \"#f58518\", \"#54a24b\", \"#e45756\", \"#72b7b2\"])\n", + " axes[0].set_title(\"Design variables\")\n", + " axes[0].tick_params(axis=\"x\", rotation=35)\n", + "\n", + " axes[1].plot(ee[:, 0], ee[:, 1], lw=2, label=\"end-effector path\")\n", + " axes[1].scatter([target[0]], [target[1]], c=\"red\", marker=\"x\", s=70, label=\"target\")\n", + " r = x[0] + x[1]\n", + " circle = plt.Circle((0, 0), r, color=\"gray\", fill=False, linestyle=\"--\", alpha=0.5)\n", + " axes[1].add_patch(circle)\n", + " axes[1].set_aspect(\"equal\", \"box\")\n", + " axes[1].set_title(\"Task-space trajectory\")\n", + " axes[1].set_xlabel(\"x [m]\")\n", + " axes[1].set_ylabel(\"y [m]\")\n", + " axes[1].legend(fontsize=8)\n", + "\n", + " axes[2].plot(err, color=\"#e45756\")\n", + " axes[2].set_title(\"Tracking error over time\")\n", + " axes[2].set_xlabel(\"step\")\n", + " axes[2].set_ylabel(\"error [m]\")\n", + " axes[2].grid(alpha=0.3)\n", + "\n", + " axes[3].plot(np.abs(tau[:, 0]), label=\"|tau1|\")\n", + " axes[3].plot(np.abs(tau[:, 1]), label=\"|tau2|\")\n", + " axes[3].set_title(\"Actuation effort\")\n", + " axes[3].set_xlabel(\"step\")\n", + " axes[3].set_ylabel(\"torque [Nm]\")\n", + " axes[3].legend(fontsize=8)\n", + " axes[3].grid(alpha=0.3)\n", + "\n", + " fig.suptitle(\n", + " f\"Objectives: final_error={obj[0]:.4f} m, energy={obj[1]:.3f} J\",\n", + " y=1.03,\n", + " )\n", + " fig.tight_layout()\n", + "\n", + " if open_window:\n", + " plt.show()\n", + " return fig, axes" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### CHECKPOINT: Quick sanity check before the smoke test\n", + "\n", + "Run this cell to verify the class can be instantiated and the pre-written\n", + "parts work. This does NOT require your fill-ins yet." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# CHECKPOINT -- class instantiation (does not call your fill-ins)\n", + "prob_test = PlanarManipulatorCoDesignProblem(seed=0)\n", + "print(\"design_space:\", prob_test.design_space)\n", + "print(\"objectives:\", prob_test.objectives)\n", + "print(\"num constraints:\", len(prob_test.design_constraints))\n", + "print(\"conditions:\", prob_test.conditions)\n", + "print()\n", + "print(\"Class instantiation OK.\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Step 3 -- Smoke test\n", + "\n", + "Run this after completing **all 3 fill-ins** above.\n", + "\n", + "What success looks like:\n", + "- Non-empty optimization history\n", + "- Finite objective values (no NaN or Inf)\n", + "- A 4-panel figure renders without error\n", + "\n", + "**How to read the 4-panel figure**: Inspect the panels for (1) design parameter\n", + "values, (2) the end-effector path in task space with the target marked,\n", + "(3) tracking error decreasing over simulation steps, and (4) joint torque\n", + "profiles showing actuation effort." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Smoke test (run after implementing all 3 PUBLIC FILL-IN blocks)\n", + "problem = PlanarManipulatorCoDesignProblem(\n", + " seed=42,\n", + " target_x=0.9,\n", + " target_y=0.45,\n", + " payload_kg=0.8,\n", + " disturbance_scale=0.04,\n", + " sim_steps=220,\n", + " max_iter=40,\n", + ")\n", + "start, _ = problem.random_design()\n", + "\n", + "cfg = {\n", + " \"target_x\": 0.9,\n", + " \"target_y\": 0.45,\n", + " \"payload_kg\": 0.8,\n", + " \"disturbance_scale\": 0.04,\n", + " \"sim_steps\": 220,\n", + " \"dt\": 1.0 / 120.0,\n", + " \"torque_limit\": 12.0,\n", + " \"max_iter\": 40,\n", + "}\n", + "\n", + "print(\"design space:\", problem.design_space)\n", + "print(\"objectives:\", problem.objectives)\n", + "print(\"conditions:\", problem.conditions)\n", + "\n", + "viol = problem.check_constraints(start, config=cfg)\n", + "print(\"constraint violations:\", len(viol))\n", + "\n", + "obj0 = problem.simulate(start, config=cfg)\n", + "opt_design, history = problem.optimize(start, config=cfg)\n", + "objf = problem.simulate(opt_design, config=cfg)\n", + "\n", + "print(\"initial objectives [tracking_error_m, energy_J]:\", obj0.tolist())\n", + "print(\"final objectives [tracking_error_m, energy_J]:\", objf.tolist())\n", + "print(\"optimization steps:\", len(history))\n", + "\n", + "# CHECKPOINT\n", + "assert len(history) > 0, \"Optimization history should not be empty\"\n", + "assert np.all(np.isfinite(obj0)), \"Initial objective contains non-finite values\"\n", + "assert np.all(np.isfinite(objf)), \"Final objective contains non-finite values\"\n", + "print(\"All assertions passed.\")\n", + "\n", + "problem.render(opt_design)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## The power of a standardized interface\n", + "\n", + "Notice what just happened: we wrapped a completely new simulator (PyBullet robotics)\n", + "as an EngiBench `Problem`, and it exposes the same interface as `beams2d`,\n", + "`heatconduction2d`, or any other problem in the benchmark.\n", + "\n", + "This means that **every generative model in EngiOpt** -- the CGAN you trained in\n", + "Notebook 01, the diffusion models, the VAEs -- could be trained on this manipulator\n", + "problem **with zero model code changes**. You would only need to point the training\n", + "script at the new problem ID.\n", + "\n", + "That is the core value proposition of EngiBench: **decouple the problem from the\n", + "method** so researchers can focus on one or the other without rewriting glue code." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Contributing to EngiBench: what you need\n", + "\n", + "If you have an engineering problem from your own domain that you would like to\n", + "contribute to the benchmark, here is the checklist:\n", + "\n", + "1. **Design space**: Define a `gymnasium.spaces.Box` (or `Dict`) for the design variables, with physically meaningful bounds.\n", + "\n", + "2. **Simulator**: Implement `simulate(design, config) -> objective_values`. This is the core -- it maps a design to measurable performance. Must be deterministic for a given seed.\n", + "\n", + "3. **Constraints**: Define constraint functions using the `@constraint` decorator. Each should `assert` what must be true for a design to be feasible.\n", + "\n", + "4. **Dataset**: Generate a dataset of (design, conditions, objectives) tuples and host it on HuggingFace. This is what generative models train on.\n", + "\n", + "5. **Render method**: A visualization that helps humans interpret designs. Not strictly required for training, but essential for debugging and papers.\n", + "\n", + "6. **Metadata**: Version number, objective names and directions, condition ranges, and a docstring explaining the problem physics.\n", + "\n", + "See the [EngiBench contribution guide](https://github.com/IDEALLab/EngiBench) for the full template and review process." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Takeaways\n", + "\n", + "Before closing, reflect on these questions:\n", + "\n", + "1. **What are the minimum requirements** for adding a new problem to EngiBench? Which methods and attributes are essential vs. nice-to-have?\n", + "\n", + "2. **Which part of the Problem interface** was most intuitive? Which was least intuitive? (For example: design_space, constraints, simulate, render, optimize...)\n", + "\n", + "3. **What engineering problem from YOUR domain** could you contribute as a benchmark? What would the design vector look like? What would you simulate?" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Optional extension -- Train an EngiOpt model on this problem\n", + "\n", + "The solutions notebook contains a full optional extension that:\n", + "\n", + "1. Generates a feasible dataset from simulator rollouts\n", + "2. Trains `engiopt.cgan_1d` (the same model architecture from Notebook 01) on the manipulator problem\n", + "3. Compares generated designs vs. a random baseline\n", + "\n", + "This demonstrates the key point: because our manipulator problem uses the standard\n", + "EngiBench interface, we can reuse EngiOpt model code directly.\n", + "\n", + "To try it yourself, see the **solutions notebook**:\n", + "`workshops/dcc26/solutions/03_add_new_problem_scaffold.ipynb`\n", + "\n", + "The essential idea in ~10 lines of pseudocode:\n", + "\n", + "```python\n", + "# 1. Generate dataset\n", + "for _ in range(N_SAMPLES):\n", + " design, _ = problem.random_design()\n", + " if problem.check_constraints(design, cfg) == []:\n", + " obj = problem.simulate(design, cfg)\n", + " dataset.append((design, conditions, obj))\n", + "\n", + "# 2. Train CGAN on top-performing designs\n", + "generator = cgan1d.Generator(latent_dim=8, n_conds=4, design_shape=(6,), ...)\n", + "# ... standard GAN training loop ...\n", + "\n", + "# 3. Generate + evaluate\n", + "new_design = generator(z, conditions)\n", + "obj = problem.simulate(new_design, cfg) # same interface!\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Troubleshooting\n", + "\n", + "- **`NotImplementedError`**: You have not yet filled in one of the 3 exercises. Check `simulate()`, `random_design()`, and `optimize()`.\n", + "- **`AssertionError` in smoke test**: Your fill-in runs but produces incorrect values. Re-read the hints in the `# START FILL` block.\n", + "- **PyBullet connection error**: Make sure `pybullet` is installed. On Colab, the bootstrap cell handles this.\n", + "- **If a section fails, do not continue downstream.** Fix locally first, then rerun." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "name": "python", + "version": "3.11.0" + }, + "accelerator": "GPU", + "colab": { + "provenance": [], + "gpuType": "T4" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/workshops/dcc26/requirements-colab.txt b/workshops/dcc26/requirements-colab.txt new file mode 100644 index 0000000..aee3142 --- /dev/null +++ b/workshops/dcc26/requirements-colab.txt @@ -0,0 +1,15 @@ +# Local convenience snapshot for workshop notebooks. +# Source of truth for Colab is the install/bootstrap cell inside each notebook. +# EngiOpt is intentionally installed from Git in notebook bootstrap cells. + +engibench[beams2d] +sqlitedict +matplotlib +seaborn +gymnasium +pybullet +tqdm +tyro +wandb +torch +torchvision diff --git a/workshops/dcc26/solutions/00_setup_api_warmup.ipynb b/workshops/dcc26/solutions/00_setup_api_warmup.ipynb new file mode 100644 index 0000000..24151d9 --- /dev/null +++ b/workshops/dcc26/solutions/00_setup_api_warmup.ipynb @@ -0,0 +1,586 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "# Welcome to the DCC'26 EngiBench Workshop!\n", + "\n", + "In the next 20 minutes you will **load an engineering-design benchmark, explore its data, and break its constraints on purpose**. No ML required yet — just Python and curiosity.\n", + "\n", + "> **Colab users:** click **File ➜ Save a copy in Drive** before editing so your changes persist." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Install dependencies (Colab / fresh env only)\n", + "\n", + "Skip this if your local environment already has `engibench` installed." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Colab/local dependency bootstrap\n", + "import subprocess, sys\n", + "\n", + "IN_COLAB = \"google.colab\" in sys.modules\n", + "FORCE_INSTALL = False # Set True to force install outside Colab\n", + "\n", + "if IN_COLAB or FORCE_INSTALL:\n", + " def _pip(pkgs): subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", *pkgs])\n", + " _pip([\"engibench[all]\", \"matplotlib\", \"seaborn\", \"ipywidgets\"])\n", + " try:\n", + " import torch\n", + " except Exception:\n", + " _pip([\"torch\", \"torchvision\"])\n", + " print(\"Install complete.\")\n", + "else:\n", + " print(\"Using current environment. Set FORCE_INSTALL=True to install here.\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## What is EngiBench?\n", + "\n", + "\n", + "\n", + "EngiBench is an **open benchmark suite for engineering design** with ML. Three things it gives you:\n", + "\n", + "- **Standardised problems** — beams, heat sinks, photonic crystals, and more, each with the same Python API\n", + "- **Ready-made datasets** — thousands of optimal designs with their operating conditions, hosted on HuggingFace\n", + "- **Built-in evaluation** — constraint checking, simulation, and metrics so results are comparable across papers" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Exercise legend\n", + "\n", + "| Marker | Meaning |\n", + "|---|---|\n", + "| `PUBLIC FILL-IN CELL` | Your turn — edit the code between `START FILL` / `END FILL` |\n", + "| `CHECKPOINT` | Automated check — if it fails, fix before moving on |" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Step 1 — Choose a problem and set up\n", + "\n", + "EngiBench has many problems, all with the **same API**. Pick one by name from the list below.\n", + "\n", + "**Your task:** set `PROBLEM_ID` to one of the available problem strings (we recommend `\"beams2d\"` for this workshop)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# PUBLIC FILL-IN CELL 00-A\n", + "import importlib\n", + "import random, sys, os\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "\n", + "# Import workshop helpers (all visualization code lives here)\n", + "if \"google.colab\" in sys.modules:\n", + " import subprocess\n", + " _utils = \"/content/workshop_utils\"\n", + " os.makedirs(_utils, exist_ok=True)\n", + " _branch = \"codex/dcc26-workshop-notebooks\"\n", + " _base = f\"https://raw.githubusercontent.com/IDEALLab/EngiOpt/{_branch}/workshops/dcc26/utils\"\n", + " for _f in (\"notebook_helpers.py\", \"__init__.py\"):\n", + " if not os.path.exists(f\"{_utils}/{_f}\"):\n", + " subprocess.check_call([\"wget\", \"-q\", f\"{_base}/{_f}\", \"-O\", f\"{_utils}/{_f}\"])\n", + "else:\n", + " _utils = os.path.abspath(\"../utils\") if os.path.isdir(\"../utils\") else \"workshops/dcc26/utils\"\n", + "sys.path.insert(0, _utils)\n", + "import notebook_helpers # noqa: E402\n", + "importlib.reload(notebook_helpers) # always pick up latest edits\n", + "from notebook_helpers import * # noqa: F401,F403\n", + "\n", + "import engibench\n", + "from engibench.utils.all_problems import BUILTIN_PROBLEMS\n", + "\n", + "print(\"Available problems:\", list(BUILTIN_PROBLEMS.keys()))\n", + "\n", + "SEED = 7\n", + "set_global_seed(SEED)\n", + "\n", + "# START FILL ---------------------------------------------------------------\n", + "PROBLEM_ID = \"beams2d\"\n", + "# END FILL -----------------------------------------------------------------\n", + "\n", + "if PROBLEM_ID is None:\n", + " raise RuntimeError('Set PROBLEM_ID to a problem name, e.g. PROBLEM_ID = \"beams2d\"')\n", + "\n", + "# CHECKPOINT\n", + "assert PROBLEM_ID in BUILTIN_PROBLEMS, f'\"{PROBLEM_ID}\" not found. Choose from: {list(BUILTIN_PROBLEMS.keys())}'\n", + "print(f\"\\u2705 Checkpoint passed — using problem: {PROBLEM_ID}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Step 2 — Instantiate the problem\n", + "\n", + "One line. Every EngiBench problem uses the same constructor — just pass a seed for reproducibility." + ] + }, + { + "cell_type": "code", + "execution_count": 83, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Problem class: Beams2D\n" + ] + } + ], + "source": [ + "problem = BUILTIN_PROBLEMS[PROBLEM_ID](seed=SEED)\n", + "print(\"Problem class:\", type(problem).__name__)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Step 3 — Explore the API contract\n", + "\n", + "Every EngiBench problem exposes the **same fields**. This is what makes the benchmark fair — algorithms can only change the *method*, not the *problem definition*." + ] + }, + { + "cell_type": "code", + "execution_count": 84, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Problem class: Beams2D\n", + "Design space: Box(0.0, 1.0, (50, 100), float64)\n", + "Design shape: (50, 100)\n", + "Objectives: (('c', ),)\n", + "Condition keys: ['volfrac', 'rmin', 'forcedist', 'overhang_constraint']\n", + "Dataset ID: IDEALLab/beams_2d_50_100_v0\n" + ] + } + ], + "source": [ + "print(\"Problem class: \", type(problem).__name__)\n", + "print(\"Design space: \", problem.design_space)\n", + "print(\"Design shape: \", problem.design_space.shape)\n", + "print(\"Objectives: \", problem.objectives)\n", + "print(\"Condition keys: \", problem.conditions_keys)\n", + "print(\"Dataset ID: \", problem.dataset_id)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Key takeaway:** `design_space`, `objectives`, and `conditions_keys` are the **contract**. Any method you build must respect them." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Step 4 — Load and inspect a dataset sample\n", + "\n", + "The dataset lives on HuggingFace and downloads automatically. Your job:\n", + "1. Grab one training sample's **design** (a 2D numpy array)\n", + "2. Build a **config** dict mapping each condition key to the sample's value\n", + "\n", + "We give you the dataset loading — you extract the fields." + ] + }, + { + "cell_type": "code", + "execution_count": 85, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "DatasetDict({\n", + " train: Dataset({\n", + " features: ['optimal_design', 'volfrac', 'rmin', 'forcedist', 'overhang_constraint', 'c', 'optimization_history'],\n", + " num_rows: 3880\n", + " })\n", + " val: Dataset({\n", + " features: ['optimal_design', 'volfrac', 'rmin', 'forcedist', 'overhang_constraint', 'c', 'optimization_history'],\n", + " num_rows: 728\n", + " })\n", + " test: Dataset({\n", + " features: ['optimal_design', 'volfrac', 'rmin', 'forcedist', 'overhang_constraint', 'c', 'optimization_history'],\n", + " num_rows: 243\n", + " })\n", + "})\n", + "design shape: (50, 100)\n", + "config: {'volfrac': array(0.2375), 'rmin': array(3.5), 'forcedist': array(1.), 'overhang_constraint': array(0)}\n", + "✅ Checkpoint passed — dataset sample loaded correctly.\n" + ] + } + ], + "source": [ + "# PUBLIC FILL-IN CELL 00-B\n", + "# Goal: extract a design array and build a config dict from one training sample.\n", + "\n", + "dataset = problem.dataset # <-- this is provided for you\n", + "print(dataset) # inspect the splits and columns\n", + "\n", + "sample_idx = 0\n", + "\n", + "# START FILL ---------------------------------------------------------------\n", + "design = np.array(dataset[\"train\"][\"optimal_design\"][sample_idx])\n", + "config = {k: np.asarray(dataset[\"train\"][k][sample_idx]) for k in problem.conditions_keys}\n", + "# END FILL -----------------------------------------------------------------\n", + "\n", + "if design is None or config is None:\n", + " raise RuntimeError(\"Uncomment / fill in `design` and `config` above.\")\n", + "\n", + "print(\"design shape:\", np.array(design).shape)\n", + "print(\"config: \", config)\n", + "\n", + "# CHECKPOINT\n", + "assert tuple(np.array(design).shape) == tuple(problem.design_space.shape), (\n", + " f\"design shape mismatch: expected {problem.design_space.shape}, got {np.array(design).shape}\"\n", + ")\n", + "missing = [k for k in problem.conditions_keys if k not in config]\n", + "assert not missing, f\"config missing condition keys: {missing}\"\n", + "print(\"\\u2705 Checkpoint passed — dataset sample loaded correctly.\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Step 5 — Design gallery\n", + "\n", + "Eight random training designs with their conditions. Notice how different conditions produce very different structures." + ] + }, + { + "cell_type": "code", + "execution_count": 86, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABjUAAAKCCAYAAACZA1vXAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjEsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvc2/+5QAAAAlwSFlzAAAPYQAAD2EBqD+naQAAzWNJREFUeJzs3QeYFFXW+P9LhiEPQ4YhSVRAkpEgGAATJsxhzTntu+qurznguu6a1lVXVsWc3l3MCUWiASQpoOQw5DDAkBmg/s+5++/+1T12V3fPDDNdPd/P86B9u6qrq6un76mu2/ecCp7neQYAAAAAAAAAACDNVSzrHQAAAAAAAAAAAEgGgxoAAAAAAAAAACAUGNQAAAAAAAAAAAChwKAGAAAAAAAAAAAIBQY1AAAAAAAAAABAKDCoAQAAAAAAAAAAQoFBDQAAAAAAAAAAEAoMagAAAAAAAAAAgFBgUAMAAAAAAAAAAIQCgxoAAABIO8ccc4xp3bq1CaOlS5eaChUqmPvuu6/I2/jd735nt5GuSmP/SuI4AgAAAMg8DGoAAABkiMWLF5urrrrKdOrUyWRlZZn69eubzp07m0suucR88803Zb17aeHJJ580o0aNKuvdAFIif7MywOP/V7NmTdOhQwdz0003mby8PJNpNm3aZJ566ilzwgknmJYtW5oaNWqYjh072j4u1uuVwS//8alatapp2LChOeKII8ytt95qfvrppzJ5HQAAACh5lQ/ANgEAAFDKfvzxRzNgwABTpUoVc/HFF5uDDz7Y7Ny50yxYsMB8+eWXpnbt2mbgwIGmvJNBDZkBIjMNDpRWrVrZY1+5ctFPtUeOHGmef/55U56VxHHMNDKA0adPH3t769atZurUqeYf//iHGT16tJk7d679nGeKH374wfzP//yPOfbYY80NN9xgcnJyzOzZs80///lP8+6775pvv/3WdOnS5TePe+CBB0ybNm3Mvn377MDIzJkzzYsvvmgHSH7/+9+bv/71r2XyegAAAFBy+IYAAACQAe6//36zY8cOewGve/fuv1m+Zs2aMtmvsJMLx6leKJZfiVevXr1YzyuDU/KvPCuJ45hp+vXrZ84666xo+9prrzX16tUzTzzxhPnqq6/M6aefbjKFzDibN2+eadeunXP/SSedZI4//nhzzz33mP/7v//7zeOGDh1qevfu7dz3+OOPm+HDh5u//e1vdvbGHXfcccD3HwAAAAcO6acAAAAygMzIaNCgQcwBDdGkSROn/c4775hTTz3V5ObmmmrVqtlfQZ922mkxU7TIzAapcTFr1ixz3HHHmVq1aplGjRrZX1Hv3bvX7Nq1y/zhD38wzZs3txeh+/fvb3755ZeY6XPkwqukiZFf4cvzduvWzbz99tspvc6LLrrING3a1KaXkX277bbbzPbt2xM+Vp5/2bJlZvz48U6aGqnd4H+dM2bMMIMHDzZ169a1+xcZ3LjrrrvM4Ycfbo+V7PtBBx1k/vjHP9rBpES1IPz3ffzxx/bX9nKs5HXI/stxTFSzInLfli1b7MVseQ9kG0cffbT9Vbu2ceNGc9lll9m/C3nPBg0aZF9bKvVK5L2V/WvWrJlN/3PYYYfZmT/FfX8kfZDsW+TvQF7LUUcdZV555ZXA4yjkeMsv7uU5ZJ8kvdDXX38d85hFXuuqVavMeeedZ1OySWo2eX/nz5//m9cqzyUpjmQdGSzo2rWr3f90Ju+NkOPt53meee6550yvXr3s65G/AZmtFSsV3bPPPmvTPMlnWLYjx/bCCy+Mfjb85BjLsR47dqw58sgj7bZbtGhhHn30UbtcZkdcfvnl9j2VZSeffLI9/n75+fk2JZQMWMjfsPyNyn4+9thj0XXkfdMDGkL6oOzsbDtrI1my/nvvvWfq1KljHnnkkaT6CwAAAKQvZmoAAABkALn4J79q/s9//mPOOOOMhOs/88wz9kKi5KeXAY9FixaZF154wV4gnz59umnfvr2z/ooVK+yvo8855xz7S3G5sC2/fpbUQHPmzLFpguQC/4YNG2x6FxkgkYGNihXd39DIL6TlguJ1111n2y+//LK92CwXlBOlhJo2bZq9MC8Xm6+++mp7AVYGWp5++mkzefJkO1gRNLvhtddesxdSZVDif//3f6P3yy+3I5YvX26fQ37VfeaZZ5pt27bZ+1euXGn+9a9/2fvOP/98+7rl+f7yl7/YgYIvvvjCJOPTTz+1F5CvueYae1H/gw8+sMdLLrbfeeedSW1DLsjLPssv1WXgQt4H+fX6kiVLorNKdu/ebS/+yswdOa4yGCEDVpELwsmS9+b99983p5xyin1e+TuRvy9J71PU90cGcORvSY6p/B1IXQgZqJH9mzhxoq0BE0TeGzmO8jcmr0det8xQiLVPQv7eZKBNBj9GjBhh15dURMOGDbMXxitVqmTXu/76681LL71k07fJoInspwzSyMX7dCGDa/IZE/K3KWnnZCBABtjk2PvJ4NJbb71lP6+XXnqp/Zt444037LGXfkIGNSPkb1COj6S3igwYyN+7vPaff/7Z9hV+8jf/0Ucf2f5Djpekg5LPvwxQyMCUDEjIANHChQvt+y/ryICm/z2cMGGC/RzIwKH0H9JfjBs3LuEgkvytyHE45JBDUjp28rrk70T2b9KkSfbvGQAAACHlAQAAIPS+/fZbr0qVKp6c3rVv39679NJLvWeffdabO3duzPW3bdv2m/tk3apVq3rXXnutc3+rVq3sdt99913n/p49e3oVKlTwTj31VG///v3R+5966im7/ueffx697+WXX7b35ebmeps3b47eL7flvvr163s7duyI3j9gwAD7vH7dunXzOnbs6BUUFDj3/+c//7HbludIRLYp2463TLYzcuTI3yzbvXu3t2fPnt/cf9ddd9nH/PDDD9H7lixZYu+79957f3NfVlaWvR0hx+3ggw/2mjRp4mz3kksusevHuk+/P/K+yP3PP/989L5//OMf9r6HHnrIWTdyvz62sXzxxRd2XXlev9GjR9v79f4l+/7MmjXLth999NHA5491HD/55BN73xVXXOGsG7lf75O817Ge6y9/+ctv/kblb3Do0KFeOop8fmL9O/roo73Vq1fHPOb//Oc/nfsLCwu9Xr16ea1bt3Y+s7H6g6+++irmsZP75HP//fffO58P+RuW+2+88UZn/VtvvdU+5tdff41+5mP9HSfrtttus49/8cUXnfvl70Tunzp1atzH/u1vf7PrPP3000V6bgAAAKQH0k8BAABkAEkDI7+Ul1+5yy+ZZQaE/ApeCunKr9QXL17srF+zZk37f7lGWVBQYH/9Lb/+l9Q7sVIZya/u5dfVfn379rWPv/HGG520P5L3X8iv3DVJmyRpnSLktvxaW1LWyK+045Ffi8sv+WWWhPziXPY38k/2Q15PUFqkVH7NLb9q1yQlT2QWiPyCX/ZXnltmCohYxywWmV3gT/0kx01SAknNk8iskERktolf5Bf6/uMtv6KXGQg333yzs+4VV1zhHP8gMkND6F/Oy2uQv5Oivj+R55c0SOvWrUtqX/yvS8hMCr8TTzzRdO7cOeZjZLaQzEBIdMxkv2TWUSppjUqbzM4ZM2aM/Sfvj7TluMtMHfmbjHj99dftrB15r/zvxebNm+2sG0kr5X/tkf5g//79tv+QdSWVnRyTWH/b0t9IKjb/50NmA0l/oI+17g8kZZikHJPtxkpvFURqaMiskiFDhsT8nCYi6aeE9HkAAAAIL9JPAQAAZAjJ/y+1K0SkdoSkkJGUPpJqRwY9Inn3JX3M3XffbQcSdH75WGl8Yt0nKZNiLYvcL6mRtFgXnmXgReiBF79IjY57773X/otl7dq1piTSeEXSEWmSNur555+3F77l4q+f/4JykLZt2/7mvkhqHzleUvcg1W34Hx8hKZak1oLenrz/8n4ls7/yfsiAgKSHivU+Srqzorw/UkdD0n9JbQOp3XDooYeaY4891g6aSa2RIPK6ZJ8k3ZImAy26louQ46ALjsc6Zk8++aRN2SSfIznGMtgkAwDyT6dR02RQqriDabomRiyyb5GBNCGfa/n8nHvuubamxZ///Gd7vxwHSdHUuHHjuNuS9yPy3kqaqQceeMAONEgqOL9Yfyux/o6T7Q/kdcqxlgE3WVf2XwaZZABG/g7ikZRjF1xwga29ITWBdP2UZEQGMyKDGwAAAAgnBjUAAAAykFw4ljz2cpFWfiktNQ2mTJlifzUvdSNk9oZc2JOBDbkYLL/UlouEt9xyS8wZA/Eu9Act+2+mmpIR2ZYUJ5dfaccSuXhaHFLYOBapWyHPLcWU5ZfocqFcLs5KXQipWaEHOeIJOo7JHq/SON4H+v156KGHbE2RTz75xA66yeCb1Ia4/fbbowWng6RyQTvZYy4DBDJzQC6ey4Cg1IB48cUX7edHbgcNOsjgTHHIrBUpal4UkdoQ/tof8rpk5tWbb74Z93GRmhRTp061f9cyUCSDIjLQILMp5BjLYEmsv+3i9gcyO0uOt7z/cqxlBobU+ZGaPW+//fZvHvv555/bWi4HH3ywnfFT1EEJmdUi9EwjAAAAhAuDGgAAABlMLkxKmhgZ1JAL8GL06NF24OLDDz+0v0b3k19TS2qYA0V+QS4XM/3mzp0b99ffEZHC5XLB1P9L9VQV5dfdkSLjkjbqs88+c361Lxdb05Hsq1yIl/fZP1ujsLDQznaQYt6JyPshF7Tnz59vLyb76RkRRXl/ZPuSukz+yewAuTgvhddlYKRRo0ZxX5fsk6Qy0rN+/DNHijNj4sILL7T/5CK8FL+WfZKC7jr9mp+kgyoOSfVUVPKeCpmZ4X8/5H2T4t+JZv/IwMe+ffvs37Z/loXM4Ep2BlJRyECQpEOTf/L8kcLm8v77Z+zIZ0xmcXTq1Mn+TRd18DI/P9/2fZJSSwZ3AQAAEF7U1AAAAMgAclFVaj1oO3fujNYyiKR5ivySWv+yf+TIkcVOo5PIc889Z3P2R8htSekkF9kHDBgQ93E9evSwvyyXdWOlqZLXLhctE5ELvMmsp8kxkwER/zGT54yk+0k3kjJJLhQ/9dRTv3mP/cc/SGTwSWZQ+EktBz2AkMr7I88fuRAfIemhIoMUQRfS5XWJJ554wrlfZlfESj2VLDlWUm/CT95veV0i0d+MDOQU519xZhlFap9IWqYImaUlgz9/+tOfEqZqi9cfjBgxIukZSKnYsWOH/ecn+9CtW7ffHGvpu04//XQ7s+Lrr7+2g05FIduUQSlJPyWpz+LNyAIAAEA4MFMDAAAgA0jxaJllceqpp9q8+3LRLi8vz/4KW36xLRc55X4xdOhQu1x+GX3DDTfYC6oyk0MuDEtNiViDIyUlJyfHzhyJFPmVguaSDkvSDwVdaJQLzDJbQnLvy8VPSV0kswfk4ujChQvNf/7zH1ujQVJBBZFfrktKIUm7JRfRZdaFXCiPFEqO56yzzrIXiOXYSRocuTgqxzZSPDzdyK/f//nPf5q77rrLHh8p4iypd959912bZiiZ91hmTsixeeWVV+xFYUkrtWjRIrtdGcDwF9RO5f2RVEtXXXWVOfPMM+3Fahloknov8jcgfxtBqYGkILjslwzORAq1y8yTF154wT5vJL1QqmSWg8wckM+PDGTITBHZrgzCyecjMphS1iRVV6TmhRxbOW7yGZJ0THKx3v/3Kp8xSek0ffp0c/LJJ9vP3ooVK8x3331n35PI4JMMGsggkRxbeV8kzZYMksqxlMeUNOmPZABTnlf+juT4yoCUHGuZKRIpLP7jjz/agTUZbJHXIjNJNJlRo8l6v/76qx2QkQEyqR8kMzTkPZai97rwPQAAAMKHQQ0AAIAMIDUfJEXOpEmTzL///W/7q3NJsyIXeu+44w7nYr8MXMiFvzvvvNP+Glt+JX300Ufb3PYyyCF1BQ4UqZcgF2b/8Y9/RAsVv/HGG+b8889P+FgpKC0XKOXiuKTOklkBtWvXtimJ5PUFFRmOePjhh+0Fenl+OUZywVQuXica1JALobKuDIhIgeMmTZrY/P9ysTUyAyadSAox+WW77Lf8XchghgwYyH0y4KF/KR+PFGSWgRF5j+RCtwyMyQCFDOj4BzVSeX8k1ZIMDEmRetmuzJLIzc21f4+SeiiIDJ7I37dcwJdURfJ3LH/jctFaCrlLWqqikAE1qScjxyeStisyyCGDWVJDJR08/fTT0dvyuZVC4PJ3KIN0uqD7Sy+9ZNPLyYCPvCd79uyxf7c9e/a07Qj57MsxffDBB+12pJ6GDBZJfyC1d0pay5Yt7aCXDG7JLJPdu3eb5s2bmyuvvNL2VZHBTfn7igzgyKBtLLEGNe655x77fxlwlMEeSXMmzyef1chsEAAAAIRbBa8sKwoCAACgXBg1apS9qFicgsgoPhlAiMyWSdd6IEUlAy6S1kp+pQ8AAAAgc1FTAwAAAMhAUk9Fk9kTMkPl+OOPN5n0uj755BP7y/4wvy4AAAAAySH9FAAAAJCBJJ2PpO856qijbDoqqaUgaaOkpobUTgirBx54wKa5ktRKkmJt5syZNtVSgwYNbPoiAAAAAJmNQQ0AAAAgA51wwgm2dojUSpAaEVJ/QeppSFtqXYSVFJKWwvaPPfaY2bJli8nOzrZFx+V1tWjRoqx3DwAAAMABRk0NAAAAAAAAAAAQCtTUAAAAAAAAAAAAocCgBgAAAAAAAAAACAUGNQAAAAAAAAAAQCgwqAEAAAAAAAAAAEKBQQ0AAAAAAAAAABAKDGoAAAAAAAAAAIBQYFADAAAAAAAAAACEAoMaAAAAAAAAAAAgFBjUAAAAAAAAAAAAocCgBgAAAAAAAAAACAUGNQAAAAAAAAAAQCgwqAEAAAAAAAAAAEKBQQ0AAAAAAAAAABAKDGoAAAAAAAAAAIBQYFADAAAAAAAAAACEAoMaAAAAAAAAAAAgFBjUAAAAAAAAAAAAocCgBgAAAAAAAAAACAUGNQAAAAAAAAAAQCgwqAEAAAAAAAAAAEKBQQ0AAAAAAAAAABAKDGoAAAAAAAAAAIBQYFADAAAAAAAAAACEAoMaAAAAAAAAAAAgFBjUAAAAAAAAAAAAocCgBgAAAAAAAAAACAUGNQAAAAAAAAAAQCgwqAEAAAAAAAAAAEKBQQ0AAAAAAAAAABAKDGoAAAAAAAAAAIBQYFADAAAAAAAAAACEAoMaAAAAAAAAAAAgFBjUAAAAAAAAAAAAocCgBgAAAAAAAAAACAUGNQAAAAAAAAAAQCgwqAEAAAAAAAAAAEKBQQ0AAAAAAAAAABAKDGoAAAAAAAAAAIBQYFADAAAAAAAAAACEAoMaAAAAAAAAAAAgFBjUAAAAAAAAAAAAocCgBgAAAAAAAAAACAUGNQAAAAAAAAAAQCgwqAEAAAAAAAAAAEKBQQ0AAAAAAAAAABAKDGoAAAAAAAAAAIBQYFADAAAAAAAAAACEAoMaAAAAAAAAAAAgFBjUAAAAAAAAAAAAocCgBgAAAAAAAAAACAUGNQAAAAAAAAAAQCgwqIHQat26tXn//fej7ZEjR5qmTZuaWrVqmRkzZpTpvgEA0gsxAwCQDOIFACAZxAugbDGogYxQWFhobrrpJvPuu++abdu2mR49epTp/kyePNl0797dZGVlmUMPPdR89913cdedPn266dWrl8nOzjb16tUzRx11lJkwYYKzzooVK8zw4cPtcvk3ePDgUngVAJCZMjlmfPbZZ6Zr166mfv36dp3jjz/e/Pzzz6X0SgAgs4Q5XnzyySemf//+Nh40atTInHXWWfY7hZ9cjGvfvr3dXt++fc2vv/5aCq8CADJPJseLcePGmQoVKtjBmsi/G264oZReCRAfgxrICGvWrDG7du2yF3Ji8TzP7Nu3r1T2JT8/35x88sm2k9+0aZO5/vrrbXvz5s0x12/VqpX5z3/+YzZu3GjX/8Mf/mBOOukks3PnTrt8+/btZuDAgTYg5eXlmQ0bNpiHHnqoVF4LAGSiTI4Z8qXlyy+/tMvWrVtnl51++uml8loAINOEOV5s2bLF3HHHHfb7w5IlS0ydOnXM2WefHV0+b948c8EFF5gnnnjCbnvQoEFm2LBhZu/evaXyegAgk2RyvBB169a1gzWRf88880ypvBYgCIMaKFNyEi0n0H7vvPOO6dSpk+30//a3v5l27drZX5sOGTLELF68+DfbkGl9sr5o0aKFXT8yFfCRRx4xRxxxhB2dnjt3rnn99dfNIYccYmrXrm1yc3PN3XffbZ/HH4guvPBCO2VQfgEro9WRC0XJGj16tGnevLm58sorTbVq1ez/mzRpYu+PpUGDBvYilYx8y75UqlTJBgnZFzFq1CiTk5Nj7rrrLrvflStXNn369ElpnwAgExAzEscM2Rf5JyLLly5dan89BgDlBfHCmPPPP98ObMsvamvWrGluueUW88MPP0QHLWSf5YdTcqGrevXqdp9lMHzixIkp7RcAhBnxInG8ANIVgxooU9J5Tpo0yY4IR7z22mvmoosusv9//PHH7bToVatWmYMPPticcsopv+lYZVrfnDlz7G2ZIrdo0aLoMhkQeOWVV+wFn44dO9qLQfIL14KCAvPhhx+aF154wbz55pt23f3799vty6CBBBuZETFixAhTseJ/PyZywh9J/xTr3/Lly+16P/30k/2lrJ+05f4gso2qVaua0047zVx88cWmTZs29v7x48fbwDh06FAbSCXtyKefflrMIw8A4UPMSBwzhGxblstFqptvvtn86U9/MlWqVCnGkQeAcCFe/JZ8p+jcubPdj1jbkzjRpUuXpLcHAJmAeJE4XgjZ/2bNmtlrUzLLb+XKlUU42kDJYlADZapx48bmuOOOM2+88YZty6+DxowZEw0gkpNQpu/JhRnpzCXQTJkyJentX3vttTZwyC9V5eKPDAx06NDB/sJVOvXzzjvP5gcUU6dONb/88ot57rnnbC5B6cAlt6yMbIuPP/7YTteL909G2SOdvQQUP2lv3bo1cF9lG7KOvO5+/fo5Uwcl6F199dVm7dq1diRfchwuXLgwhSMNAOFHzEgcM4RsO/I8Tz/9tOndu3fSxwAAMgHx4re/IpbvEPKL5IjibA8AMgXxInG8kFkoM2fOtK/9xx9/tDNLZPBFBmGAssSgBsqc/MJUgoV46623bNFT6YxlhFum60VIRy4jw7rAXZBIpx7xxRdf2O1LOifJCfj888/b0W+xbNkyO0WvRo0axXo9MmVPchL6SVumFyYizy1TDSWAyK8FItuTfZZf48ovqOT/MltDcqYDQHlDzAiOGX6yjeuuu85ceumlNj8uAJQnxIv/+vnnn+1FNMl/fvzxxxd7ewCQaYgXwfFCUldJyiwZmJHbMrtk1qxZZv78+cXaT6C4GNRAmZOCdBIUpk2bFp3mJ2Ram+QBj9izZ4+d8if3JysyTS/y+DPOOMPOeJCpctKpX3PNNdH8hZKjXO6X4k6xSOcuwSHev8hUv27dutlRbD9pxysYFYvkPl+wYIG9LQXCAQD/RcwIjhma7K/so//YAEB5QLz47wUq+QWy5HSXQXA/vT2JJZLuJJX4AwCZgHgRHC80mWUCpAUPSAOXXXaZN3jwYK9GjRpeQUGBvW/UqFFeixYtvDlz5ni7du3ybrvtNq9Tp05eYWGhXd6qVStv9OjR9vaSJUskCnibNm2KbtO/XMh2K1as6H300Ue2/f3333sNGzb0hg0bZtv79u3zevbs6V166aV2O/I8EydOtM+dio0bN3r16tXz/vWvf3m7d++2/8/Ozvby8/Njri/7M2vWLPt827dv9x5++GF7HBYuXGiXy/+zsrLserKP8n9pR5YDQHlDzIgfM9566y1vwYIFdv9kv66//novJyfH27x5c4pHGQDCrzzHi9mzZ3uNGjXyXnjhhZjLf/31V/ud4pNPPrH7cu+993rt27ePHgcAKE+IF/HjxdixY73Fixd7+/fv9zZs2OBddNFFXteuXb29e/emtF9ASWOmBtJmup9Mw5PUSpEpcXLfjTfeaIshyRQ3md720UcfOcWKUiHb/cc//mGuuuoqU6dOHfPwww+bc845xxlBl+3v2LHD5jyU6YB33XVXynkCpZi3bOepp56y0wkln7m0JSeikNFz/yi6TDUcPny4zXEoUxMlf+Mnn3xi2rVrZ5fL///v//7P3H777Xa/ZZ/+/e9/R5cDQHlDzIgfM+TXZDJdXPZf8vVKW9aRbQNAeVOe48Vf//pXs379enPrrbfG/CWv7Mvrr79ubr75ZhtTJFZI0dqiHgcACDPiRfx4IXU2+vfvb++TNFRSKF3qe0g6KqAsVZCRjTLdAwAAAAAAAAAAgCQwUwMAAAAAAAAAAIQCgxoAAAAAAAAAACAUGNQAAAAAAAAAAAChwKAGAAAAAAAAAAAIBQY1kNEOPvhg8/HHH5f1bgAAQoCYAQBIBvECAJAM4gVw4DCogYw2Z84cc/LJJx+w7e/cudMcdNBBpl69eoHrFRQUmPPPP9/UqVPHNG7c2Dz44IMpLQcAhDNm7N6921x55ZWmTZs2pnbt2qZTp07mpZdeirv+8uXLTa1atZx/lStXNqeeemp0nWOOOcZUq1bNWWfVqlUlut8AgPSOF4LvGABQPq9J3XjjjaZly5a2f2/evLm55ZZbzJ49e+KuT7xAJqpc1jsAJMvzPLN//35TqVIlky7uuece06pVK7Nhw4aEASc/P99erFq3bp057rjj7OMuvvjipJYDAMIZM/bu3WuaNm1qvvrqK9O2bVvzww8/mKFDh5oWLVqYE0444Tfr5+bmmm3btkXb8uWkWbNm5txzz3XWe/TRR+2XFwBA+YwXgu8YAFD+4oW47rrrzJ///GdTs2ZNez1q+PDh5i9/+Yu56667Yq5PvEAmYqYG0lrr1q3NI488Yo444giTlZVlf636zDPPmC5dutjO+6KLLjKbNm0y55xzjh1R7tGjh/n111+dx7///vv29qhRo8yhhx5qR5wbNWpkR5+ffPLJIu/btGnTzOeff27uuOOOwPV27Nhh3n77bfPQQw/ZGR0dOnSwAePFF19MajkAILwxQ573gQceMO3atTMVKlSw+zZw4EAzadKkpB4v+yNfns4444yUnxsAkLnxgu8YAFA+44Xo3Lmzff7IYEvFihXNggULYq5LvECmYlADaU86/ldeeSX6y9UPPvjAntxLh/3ll1+aAQMGREeVJUDcfvvtgVP/JBCtXLnSvPPOO+a2224zixYtsstkm9KBx/snI+H+X1LJ9PB//OMfpmrVqoH7P2/ePPtLW9m3CLn9008/JbUcABDumOG3a9cuM2XKFNOtW7ekXo98mbjgggtM9erVnfvlS0d2drb94vTqq6+mcIQAAJkQL/iOAQDlO17ITA1JQysDJLNmzbL7EAvxApmK9FNIe9dee63p2LFjtP2HP/zBXsgREjxk6l/fvn1tW6bcXXXVVXG3lZOTY/7nf/4nmpNcRs1nzpxpfxEl29i8eXNS+/TYY4/ZC0n9+/c348aNC1xXAp+MoMuIfoQEpK1btya1HAAQ7pgRIb+iuuKKK0z79u2TmnmxbNkym4ZEppL7ya/F5Ndh8oVo7Nix5uyzz7b5108//fSU9gcAyrOwxwu+YwBA+Y4Xf/zjH+2/X375xbzxxhumSZMmMdcjXiBTMVMDaU/yi/vJFL0IuaCj2/5c5Jp/XSEdd6od9cKFC83zzz9vBzaSISPnMp1PZndEbNmyxV6ASmY5ACC8McN/gUp+XSW/hJIp6DJFPJGXX37ZDqB3797duf/II480devWNVWqVDGDBw82V199tf2lFwCg/MQLvmMAQPmOF/5UVPJ94Xe/+13M5cQLZCoGNZD2krnwUxImTpxoO/N4/6655prolMC1a9faPIMyyj5s2DBTUFBgb0tBP01G9OXCk0wHjJCR+K5duya1HAAQ3pgRuUB1/fXX2xghU9RlQCIRqaMhgxryS910ec0AkEnCHi/4jgEA5TdeaIWFhXFrahAvkKlIPwX8//r16xc4oh4haT6OO+64aPu7776zF52k05dchpqM1EvRqLvvvtu89dZbZt26debvf/+7LQ6VzHIAQHhjhrjhhhvM5MmTbaqo+vXrJ/WYMWPGmA0bNpjzzjvPuV+mpH/77bd2unq1atVsCkSZPThy5MgivQ4AQDjjBd8xAKB8xgtZ57333rOpZ2Xwe/bs2bbenszgjoV4gUzFT/uAFEmH36JFi+i/hg0bmgoVKtjbkaLhQ4cONSNGjIg+5plnnrHBRtY5+uijzeWXX24uvvjipJcDAMJJ6mI8++yzNo1Iq1atYv7SSseMSIHws8466ze/0pVfYd1///02Z65c8Lr11lvN448/bvP3AgDKV7zgOwYAlD9y/enNN9+0dTgkRZRkDznppJPMk08+GV2HeIHyoIInc1wBAAAAAAAAAADSHDM1AAAAAAAAAABAKDCoAQAAAAAAAAAAQoFBDQAAAAAAAAAAEAoMagAAAAAAAAAAgFBgUAOlZv369WbQoEGmTp06Zvjw4WW6L7fccov53e9+Z28vX77c1KpVy2zZsqVM9wkA8P8QMwAAySBeAACSQbwAMguDGig1//znP02lSpXM5s2bzXvvvWfSRW5urtm2bZupW7du4HpLly41FSpUsPtfnGMgz1ezZk1z0kknmdWrVweuP3fuXDN48GBTu3Ztk52dbS6//PLosvvuu89UrlzZBr/Iv3feeafI+wYA6YSYQcwAgGQQL1KPFxHnn3++fe6ZM2c6999///2mcePG9sLfBRdcYF8HAIQd8SK1eDFq1Ch7vPzfH/7yl78UeXtASWNQA6VmyZIl5uCDDzYVK6b+Z7d3714TdmPHjjV33HGHDZ7r1q2zXxTkS0I8q1atsr8iOPvss+36Ehyuv/56Z52TTz7ZBr/Iv3POOacUXgkAHHjEDGIGACSDeJFavIj45JNPzNq1a39z/8svv2xefPFFM3HiRPvr4Y0bN5qbbrrpAO09AJQe4kXq8aJr167O94fbb7+9WNsDShKDGigVMrXv1VdfNc8++6wd3ZUT5ddff9107tzZ1KtXz/Tt29dMnz49uv4xxxxjO8sTTjjBjvh+9tlnpqCgwNxwww2mVatW9ldDffr0MXl5eXZ96VxlmYwQN2rUyFx88cXO1L0JEybYzlie+4wzzjBbt26NO9o9ZswY061bN/tLV+mUr732Wnv/YYcdZv/fokULu5033ngjpWMgXxAuvPBCc/jhh9vX9Mgjj5jx48ebxYsXx1z/iSeesBeo5Je2NWrUMNWqVTM9e/ZM6TkBIIyIGcQMAEgG8SL1eCFkP2+99Vbz/PPP/2bZSy+9ZAcxOnToYI/hgw8+aN566y2zc+fOlPYLANIJ8aJo8aI0twekzANKySWXXOLdfPPN9vb48eO9WrVq2f/v2bPHe+KJJ7yGDRt6mzdvtssHDBhg2z/88IO3f/9+b8eOHd7pp5/uDR482Fu5cqW3b98+b/r06d769evt+sOHD/fOO+88b9OmTd62bdu8c88917vwwgvtsvz8fK9u3bre888/7xUWFnoffvihV7VqVbs/YsmSJZ58FOSxomnTpt6rr75qb8u2Jk+eHHO9CNl2vH9du3aNrtetWzdv5MiRzmObNWvmvf/++zGPV58+fbwrr7zSO+qoo7zs7Gyvb9++3vfffx9dfu+993q1a9e2y9q3b+/deeed3s6dO4v9PgFAOiBmEDMAIBnEi9Tihbj++uu9Bx54wN6W554xY0Z0WZ06dbwxY8ZE23IcZZ2ZM2cW6f0BgHRBvEgtXrz88ste9erV7XFo3bq1d+211zrPXZT4A5SkyqkPgwDF99prr9kR3f79+0eLJD333HN2GrTkdhXy/8hItIyIjx492ixbtsw0a9bM3tejR49osad///vfZsOGDXaEXTzwwAN2WqHkAPz444/tY66++mq77JRTTrG/Zo2nSpUqZuHChXa7DRs2NEcddVTga0k2n6GM3Ef2L0La/hF6v/z8fPurqM8//9z+AuCFF16wqUPmz59v6tevb39pcMUVV9jXJnnU5XjKczz11FNJ7Q8AhAUx47+IGQAQjHiROF58++23Zty4cc4vkoO2J/udlZUVd3sAEEbEi8TxQo7Nzz//bNq2bWtf95VXXmkuueQS88EHHxRpe0BJI/0UysSKFStM69atnfvatGlj74+QaXsR0oFKKg3/ff6pevv377ePlw5U/skFHcmTuGbNGptnXKYH+um2nwSq2bNnm44dO9og9e6775qSINMD/dMPhbRlSmG89U877TRz9NFHm6pVq9qpjNWrVzffffedXS4BUqYdyus85JBDzIgRIyj6CiAjETP+i5gBAMGIF8HxYs+ePeaqq66yF+4kViSzPckjv2PHjrjxBwDCiHiR+PuFDGYcdNBB9nXIa3v66aftAI3EhKJsDyhpzNRAmZALK9Lx+0lb7o/wF2+SDn/37t02X2HLli2dx0lb1pVAIb8i0mREXAKQnxS9kzyHsUgOchlll6D0/vvv26KrAwYMiFtMSjryeGS/58yZY29LTsSZM2dGl0UKuUpexVi6d+9u9yFZRSl2BQBhQMwgZgBAMogXwfFCXssvv/xiTj/9dOf+gQMHmrvvvtv8/ve/j27v2GOPtcvktlzIkxobAJApiBeJv19okef/b+bC4m8PKC6+0aJMyDQ/KWo0efJk++ufv//972bjxo3mxBNPjLm+FEcaNmyYueaaa2wnKZ37jBkz7GOaNGlif50qv0qV6X5CRsNldFucdNJJZuXKlWbkyJH2uWQ64dixY2M+j/x6SaYhbtq0yXbYkal0lStXttP+5L5FixY5j5Epd/H+RYKHuPTSS20hqilTptiR7TvvvNMGJhn9jkWm9sm0vh9++MHs27fPFvKTIBqZeiivT16/mDdvnt3emWeeWYR3AwDSGzGDmAEAySBeBMcLufAmF9bkIlTkn5CZexJHItuTX+MuWLDA/uL2nnvusSlYatSoUcR3BQDSD/Ei8feLTz/91L5WITNYbr75ZjNkyBBbFLwo2wNKGoMaKBPS0UnQuPzyy02DBg3M22+/bT777LPf5OPze+WVV+yJeO/eve16Ekx27txpl0mewsgUvzp16ph+/fqZadOm2WXZ2dn2Qo/kDZd1/vWvf5kLLrgg7vO8+eabdoqdTJm78cYbbVv2UU7k7733XjN06FC7Hbk/FZIz8ZFHHjFnnHGGDUYyii9BNEJuS3qQiL59+9pjdO6559rne/XVV23wixyj9957z05HlIAi+zR48GDz17/+NaV9AoAwIGYQMwAgGcSL4HhRqVIl+ytk/z8hvxaOpAu57LLL7IUqSWcoy2WfqL8EINMQLxJ/v/jmm29s+iuZfXLkkUfawQoZcEl2e8CBVkGqhR/wZwEAAAAAAAAAACgmZmoAAAAAAAAAAIBQYFADAAAAAAAAAACEAoMaAAAAAAAAAAAgFBjUAAAAAAAAAAAAocCgBopt3Lhxpl69emW9Gxll6NCh5tlnny3r3QCAEkW8KHnECwCZiHhR8ogXADIVMaPkETMQBgxqACXovvvuM6eddlqxt/PZZ5+Z6667Lql1R40aZQ499NAiP1dhYaG54YYbTP369U12dra58cYbzd69e4u8fqrbA4DyiHhBvACAZBAviBcAkCxiBjGjPGFQA8UinQXCfcweeughM2nSJDN37lwzZ84cM3HiRDNixIgir5/q9gCUD+nW94VBuh0z4gWA8tj3hUG6HTPiBYDy2v+FQbodM2IGisxDxlqzZo03fPhwLycnx2vZsqV35513eoWFhV63bt28V155xVl3yJAh3ogRI+ztrVu3etdff719TMOGDb2LLrrI27x5s122ZMkST/5sXnrpJa9du3Zeo0aNvG+++carW7euN3LkSK9FixZedna2d9ttt0W3vWzZMu+4446z+1GvXj3vxBNPtNuJuOSSS7wrrrjCO+ecc7xatWp5HTp0sNuM2LRpk3fWWWfZ5+jYsaP39NNP231Ixptvvmlfb+3atb3c3Fzv5Zdftvfv37/f++tf/+q1bdvWq1+/vjd48GBv0aJF0ce1atXKe/TRR73DDz/c7lP//v295cuXRx97++23e40bN7bbbd++vffRRx95o0eP9qpUqeJVqlTJq1mzpv0XeX2XXXaZfS9kfdn/6dOne0cffbR9bjku5557rrdhw4bo8w8YMMB74okn7O2g4yvbqVatmlexYsXoc8rxToVs87333ou23333XXusirp+qtsDUPaIF8SLZBAvABAviBfJIF4AEMQMYkYyiBkoKgY1MtigQYO8888/3waEpUuXel26dPEefvhh77HHHrMdesTq1attxxfpIKWjO++882zHvW3bNtu5XXjhhU4AOe200+zy7du32w5OOrBbb73V27lzpzd37lwvKysrGgTkMZ9++qldtmXLFhsM/M8vHax0rLL+3r17vQcffNB24BHy3EOHDrVBbNWqVV6fPn2SCiAffvih7Wy//vprb9++fd7atWtthyskgDZr1sz76aef7H79/ve/t8dHAqyQ5+/atau3ePFiu1yeX/ZTfPHFF7aTXLlypW1Lhz1v3jx7+9577/WGDRvm7Ic8rkaNGt7nn39u90OO2cyZM72JEyd6e/bssYG+X79+NojGCyBBx1eCYvfu3Z3nlG1L0In379prr7Xr5efn22O5YMGC6GPnz59v74ucNPglWj/V7QFID8QL4gXxAkAyiBfEC+IFgGQRM4gZxAwcSAxqZKgVK1bYD610ThFvvPGGHcGVTlgChqwjHn/8cRtsxLp162xnJR2BvwOQ9aVzjwSQGTNmRJdLR1ahQgXbMUZIgJBR51jksTKSK51ppIOVEXG97zJKLM8pzz116lRnlDWZACIj/ffff3/MZbJ/f/7zn6PtXbt22SA2efLkaAB57rnnostff/1175BDDrG3x44da0eyv/zySxsA/OIFEH2fJiPqBx10UNwAEnR8YwWQZMlJgxzL9evXR++TvwG5Ly8vL+X1U90egLJHvCBeJIN4AYB4QbxIBvECgCBmEDOSQcxAcVBTI0OtWLHCVK9e3TRu3Dh6X9u2be39TZs2NYMGDTJvvPGGvf/VV181F198sb29dOlSs3//ftOmTRtTr149+69Pnz6mYsWKZs2aNdFt5ebmOs9Xp04dk5WVFW3XrFnTbN261d5ev369Of/8803Lli3tev379ze7d++OLhdNmjRxHitk+YYNG2y+P3lsvOeOZ9myZaZ9+/Zxj0/r1q2j7WrVqplmzZrZ++PtU2R/Bw4caO6//35z9913m5ycHHPmmWeaJUuWBO6L3ueFCxeaYcOG2eeUY3LhhRfa1xpP0PEtjlq1atn/b9myJXpf5Hbt2rVTXj/V7QEoe8QL4kUyiBcAiBfEi2QQLwAIYgYxIxnEDBQHgxoZqkWLFmbXrl1m7dq10fskOMj94qKLLjKvvfaamT17tpk/f77tBIV01BIsVq1aZTZv3hz9J9tq3rx5dFuyTrL+9Kc/mR07dpjp06ebgoICM2HCBHu/zBRKRDroKlWqmLy8vOh9y5cvT+p5W7VqZTvqWOQ4yPGI2LNnj33NkeOTyHXXXWe+//57uy8SfG666abA46Lvv+aaa+zxlMJFckxef/31pI5HMtsWUghJOvN4/+T5Rf369e1rnjlzZvSxclv+DurWrfub7SZaP9XtASh7xAviBfECQDKIF8QL4gWAZBEziBnEDBxoDGpkKOmcZPT2D3/4g9m+fbvt6B5++GFzySWX2OWnn366HTWW5XI7MpopI8GnnXaaueGGG6KjtDIaPnr06CLvi3SQMqIrI+wbN260I8rJqlSpkjn77LPNfffdZ0dXZV/+9re/JfXYq6++2jz11FNm/PjxdqR/3bp1ZsaMGXaZjEI/88wztgOXEfq77rrLHrPDDjss4XanTp1qvv32Wxt0atSoYUeoK1eubJfJrxDkuO7duzfhMZFRYhntluD42GOPmaKS51y9erXZuXNn9L5+/fqZbdu2xf33/PPPR9e99NJL7d+GHFv5N2LECHPFFVfEfb5E66e6PQBli3hBvCBeAEgG8YJ4QbwAkCxiBjGDmIEDjUGNDPbmm2/aTkVGh48++mhz0kknmdtvv90ukw5dRsK/+OKL6DS/iFGjRkWn+EkHJ53RtGnTirwfEjBkdFpGTGU/hg4dmtLj//73v9uRZ5kud8wxx9iAUrVq1YSPk0D4+OOPm+uvv96OyMrr+fnnn+0yec033nijOfnkk23QnDVrlvnoo4+igSBR5y+j4g0aNLCPldF0CVRi+PDh9pg1bNjQHsN4ZL8+/vhju65M+Yv8KqEoZNrmEUccYQOgPGeyvxqIkCmLRx55pOncubP9J+/RnXfeGV0uI+iRUfRk1k+0HED6IV4QL5JBvABAvCBeJIN4AUAQM4gZySBmoKgqSGGNIj8aKANvvfWWueeee8yCBQvKelcAAGmMeAEASAbxAgCQLGIGkB6YqYG0J4Hixx9/tPn95PZDDz1kR58BAPAjXgAAkkG8AAAki5gBpCcGNZD2JP+i5BuUHIsDBgyweRkl36CIV3RIcuYBAMoX4gUAIBnECwBAsogZQHoi/RQAAAAAAAAAAAgFZmoAAAAAAAAAAIBQYFADAAAAAAAAAACEAoMaAAAAAAAAAAAgFBjUAAAAAAAAAAAAoVA52RUrVKhwYPcEQNqqWrWq065fv77Tbt26dfR29+7dnWW9evVy2l27dnXa7dq1i7vtKlWqFGOvUVaIF0D5VbNmTafdtm1bp33MMcdEbx911FHOsh49ejjtFi1aBG4bmYGYgbJ00EEHOe1TTz3VaQ8ePNhpd+vWzWk3adLkAO4dAD/iBRBb7dq1o7fbt2/vLDv22GOd9qBBgwLPvxs2bOi0K1bkt/BIX/x1AgAAAAAAAACAUGBQAwAAAAAAAAAAhAKDGgAAAAAAAAAAILNqagAov/bu3eu0d+/e7bS3bNkSvb127Vpn2apVqwJzNPrzP+o6GnpZ5cp0WQCQzvbs2eO08/Pznfbs2bPj1sioVKmS0963b5/Tzs3Nddq1atUq9v4CKN/Wr1/vtMeMGeO0N27cGHheq3OR+/spXYMOAIADff6tz70XLlzotFu2bOm0dazS11zq1asX91wdKGvM1AAAAAAAAAAAAKHAoAYAAAAAAAAAAAgFBjUAAAAAAAAAAEAokKAeQEL79+932rt27XLaW7dujVtTY/ny5XFzMoqsrCynXa1atejtihUrBuZPJ6cjAKSXwsLCwHz0/ry+umaGvz5TrPpNOha1adMmbn0OhMfAgQPLehcQcps2bXLaS5cuddqbN2+O+1jd7/z8889OOy8vz2n/9NNPTrtXr15O+6STTorePvLII51lOTk5TpvzWABASfCfM+uYuGTJEqddt27duNdfhOd5cc+39bUc/VigtDFTAwAAAAAAAAAAhAKDGgAAAAAAAAAAIBRIPwUgZTolSEFBQdz0UzVq1HDa1atXd9qVK1eO265QoYKzTLd1uhGm8QNAetHpCleuXBkzdaGOJWLnzp1Oe+/evXGfxz81XpCOKjzuuuuust4FhIxOjbFs2TKn/cEHHzjtCRMmJJ2OStPrzpgxI/C5/X3cihUrnGWHHXaY027fvr3Trl+/ftL7BZRHTZs2ddo6VbFOUwmEJZbp8+VE7SA6raJOP6U/J/razvbt2532tm3borfbtWvnLGvUqFFganF9/QYoaczUAAAAAAAAAAAAocCgBgAAAAAAAAAACAUGNQAAAAAAAAAAQGbV1Khdu3ZgbjSd2xTAgaVzIe7bty8w93hQLvJU6c+7P89icbcVJFFORmpspIeGDRsGvm/kuwXK1p49ewJrVxQWFh6w5/Z//nWu+gULFpRY3KLGRngMGjSorHcBIZefn++0GzRoEHgeMn78+CLV10jmuSdPnhy9vXz5cmfZ1KlTnfZxxx3ntHv16uW0W7RoEb1dp06dYu0nkAn69evntMnXjzDz17LYtGmTs2z16tVOW9ct1XUzguh1ly5dGnc/YtXU8O+brofXqVOnuHFL1KpVy2nzmUVJY6YGAAAAAAAAAAAIBQY1AAAAAAAAAABAKDCoAQAAAAAAAAAAMqumhs5NHJQLjfoaQMnTdQh0TvQdO3YE1rnw50zXeROLy/+ZL059jZJGjY2y0bdvX6dNDSYgsQOZY1bHiw0bNjjtlStXOu1169YdsHgRROfwXbJkSZGPX8WK7u92Wrdu7bSzsrKKtI8A0k92drbT7t+/f9KP9dfXKIkaGwUFBdHbc+bMCexr582b57QPP/xwpz1kyJC4y/RrBsqDM88802nznQJh5r8+s2LFCmfZjBkzAh+rz8937dpVpDiVTK09//r6sfrcXdfDy83Nddr++lDU10BJYKYGAAAAAAAAAAAIBQY1AAAAAAAAAABAKDCoAQAAAAAAAAAAMqumRq9evQ7sngAIpHMd6vyF+fn5Tnvt2rVOe/369XHrcxQWFpbYfurcpsWpsZEoR7penqjtr7Ght4WSc/755ye9Lrk0gQNv69atTnvp0qVO+/vvv48bE3QsKc381YlqbPj7D92nV69ePbDdsmVLp12lSpVi7y+A9FC/fv3AGhv+fmzfvn3OskmTJpVojY2gbU2dOtVpL1u2zGnn5eVFb69evdpZ1qNHj8C6QfoYAJlYt0+fk/C9AmHir4uh+3993qrrXOhze//5ur7Wk4iux6HjjX+53g/9WF1TQ++LP1b562sIPr8oCq7qAQAAAAAAAACAUGBQAwAAAAAAAAAAhAKDGgAAAAAAAAAAILNqavTr1+/A7gmAQDpfoc7Lu3LlSqddo0aNuNvS+YO3bNliDpSSrLGh6RzqlSpVCmz718/KynKWkcOx5Ojc1Rr1TIDSpfvdBQsWBNZs8scXXddC5/AtTXpf/LVBdO7h7OzswHbt2rWddsOGDUtwTwGkE11fwp+Xf926dc4ynddc9zs5OTlOW/c9mzZtSroeh841vmbNGqf9zTffxK0ppGtqnHLKKYG1B/R+c96LMGrWrFlZ7wJwQK6T6Gs3+tqPvz5qrHN7//UdfxwqSv1Uvf6GDRvifmfQbf1YHef8r7lNmzbOMmpsoCi4sgQAAAAAAAAAAEKBQQ0AAAAAAAAAABAKDGoAAAAAAAAAAIDMqqnRq1evwDz5fuQ+A0qezqu4cePGwHzBOn+h//E7duwI3Pbu3btNOtTY0H2Jbleu7HZhVapUcdpVq1aNu1w/tlq1agn3Hclp1KhRWe8CgID4oOl44q9VsXbtWmfZzp07nfbevXtNWfHX99B58H/++Wen3bRp08BjovPi65obADJHgwYNorc7d+7sLNM5vvPy8px2p06dAmtb+NefOnVqYM0Mff6t+WtyzJw5M24/Has2iO67e/fu7bTbtWuXdIwAAJQ8/7WNevXqxe2jY10z0bUs/LVEFy9eHBgfEsUezX9dSdeK0s+lr/UEXTfWqLGBomCmBgAAAAAAAAAACAUGNQAAAAAAAAAAQGaln9JTgUg/BZSuwsJCp52dnR2YakmnkPJPWdyyZYuzbPv27YHPpVNZlVY6Kt2X+KdVxkoZVaNGDaedlZUVN72IPl562zo9FQCEle7fcnJyAqe4d+/ePXp79erVzjI97Vy3y4rejwULFjhtnV6lbt26gfGkbdu20ds1a9YswT0FUNb855fNmjVzlnXt2tVpz507N3Bbev2BAwdGb3fp0sVZNm7cOKc9a9asIvenet3Jkyc77eXLlzvtnj17Ou0hQ4ZEbx922GGB6fr0+TUAoGTp89DGjRs77YMPPjjw3N6fNlWnUJ03b57T1uf2/nSuqdLXlZYsWVJi14Vat24deO5esSK/0QczNQAAAAAAAAAAQEgwqAEAAAAAAAAAAEKBQQ0AAAAAAAAAABAKSSeN13nZAJQunWOwSpUqTnvv3r2BuRE3bNgQ87auYxGrHoeuuVFar1Pvl86bqOti6Jy/tWrVctr+vOj++hqxjqd+rM5bCQBhpWtEtGjRIm7e3kWLFjnL1qxZk1JNptKyZ8+ewHzBP//8c2DuYt3H+3Pu67py1NgAMoeuH9G7d2+nPXHiRKedl5fntJcuXRr38ToHeqdOnZz2l19+6bRnzJgRd9uJ6m0kqnek99uf91zXDfHXBdF1lmLV9QMAFI+uJaqvv+bm5jptfS3Dv76uPaHPW2fPnh1Yg6mgoMAcqBob/ms9+vrWvn37Ar9T6Bobul6evp6D8oGZGgAAAAAAAAAAIBQY1AAAAAAAAAAAAKHAoAYAAAAAAAAAAMismhoA0ivPos6jqPPbNmvWzGlv3Lgxbk0NnXdX17LQ+Qx17vIDZf/+/YG529evXx9YU0Pnj/S3dT51nYNRH2+9LWpsAAgr3b/p+OHPWdutWzdn2bJly5y2jie6XVZ27twZmPdeHwPdrly5ctz6Ta1atXLaOp4ACI+srCyn3aFDh8B6Ejr3+HfffRe3psagQYOcZYMHDw6ssaFraowZMyZ6e+rUqc6ylStXBvZ5mj7XnzJlStzXNH/+/MD97tGjR9w+Uec4BwCkLtG1CH2tx38dRK+r41yiunL6nFnXyUiFfqx/27qmhr7mpOOabrdt29Zp5+TkxL0uhMzFTA0AAAAAAAAAABAKDGoAAAAAAAAAAIBQYFADAAAAAAAAAACEAjU1gJDy5/sWtWvXdtqNGjVy2i1btozezs/Pd5bptq6psWvXrsB8hzof4oGin7egoMBpr1mzJrDuiL+tc6Trmho6t2TFihUDc1Pq5QAQFjq3bpMmTeLml+/SpUtgbSOd71bXQiorej+WLFkSGFP9+Yjr1asXGG/9xytWHmQA4dGiRQunPXDgQKf966+/Om1dj+Lbb7+NWzPjoIMOctq6/kRubq7Tbt++ffT2xIkTnWVfffWV054+fXpgDQ3Nf26v86frx86bNy+wpsapp54avd23b19nWYMGDZw2/SMAlPy5e8OGDeNe50h03SPRdQx/jChOfQ39eB17dN3WHTt2BF770ef2HTt2jHturq/dEIsyB1fhAAAAAAAAAABAKDCoAQAAAAAAAAAAQoFBDQAAAAAAAAAAEArU1ABCSucB1HkV69at67SbNm0aN1fuxo0bnfbWrVsDc6Tv3bs3bj7D0qqvIXbv3u209etasWJF3PyROrekPn6JclHq/Ou6fgcAhJU/frRt29ZZ1qtXL6e9du1ap61z7ebl5QXWRiorOg+vzuvrr6PRvHlzZ1lOTk7c+huiTp06JbinAEqT/vz27NnTaffv399pv/nmm0570qRJ0dtdu3YNrC+ha2pkZ2c77T59+kRvt2rVylnWrl07p/3555877RkzZjjtRHUzgpbNnDkzcFv+2krr1q0LjBk6puhjAABInb8eqK4Fp+NFotqhQdeddE264tTY0DUy9u3bF3itR1+T0jU3/LWi9PUqfS6vz92psRFezNQAAAAAAAAAAAChwKAGAAAAAAAAAAAIBdJPARlCTxvUU+oaNmwYN+2Gnvqn29u2bYs7tU9P79PLDiSd6kq/Lj2N0J8yKlH6qRo1agS2g9JR6dRUABAm/lR9/tSFokuXLk57w4YNgekMdTzR66cLHef80+unT58emH4qKysrcJq/jscAwqNJkyaB6afmzJnjtKdMmRK9PWbMGGdZ586dU0q95D83bdmypbOsVq1aTrt9+/aB6ac+/PBDpz1x4sTo7fz8fJMKnZ7Kn3Jr+fLlzrIePXo47SFDhjjtww47zGnrmKPPvwEAqV0X0mnJ27RpU2LPVZLpqPS1nFWrVgWmsNXXnfxtva5ObZUoppKOKjyYqQEAAAAAAAAAAEKBQQ0AAAAAAAAAABAKDGoAAAAAAAAAAIBQIPE7UA5yoovatWvHzQ+s8xfq3OK6vXPnTqe9e/fuuPkKdT7D0qyxoffbn19SHx+ds9d/vGLlRNfr+7enH1upUqUkXwEApBfd9+Xm5gbW2Fi6dKnTXr9+fdz4oWNPWfLXhhLr1q2L3p49e3ZgDQ0dT3QuY527mBobQHjommuHHHKI0z7uuOOc9i+//BK3roVut2rVKqUaG0Hr6rbetr+2nm5PmzYtMEe6rqGh+ZfrdXWNDb3tuXPnOu2BAwc67e7du0dvZ2dnB+4HAOC3dH2IOnXqhKLGhv8ak1i7dm3gdSb/+nv27AlcV1+z0jHTfz2H+hrpjZkaAAAAAAAAAAAgFBjUAAAAAAAAAAAAocCgBgAAAAAAAAAACAVqagAZSuf+q169ety8u82aNQusRVFQUBC43J8XXecr1HkUdd2LA0k/l38/N2zYEPf4xMp5XqtWraRrauh86npb1NgAEFZ169Z12u3atXPahx12WGAM8Oe41fU30qnGhr/2R15enrOscmX39Fn3+Xq5zsnvz9tbtWrVEtlfAKVD13Xo2bOn0+7Ro0f09oQJE5xl48ePd9rdunUL3FZx8njr/ezXr1/cfmj69OnOsq+//tpp//jjj0575cqVTnvr1q1x90PX2JgyZUpgzY358+c77cGDB8c8tvo1pFqTBADKq7DW2NDXmTZu3Bi3ToaulZeopsb+/fudduvWreMeH2pspBdmagAAAAAAAAAAgFBgUAMAAAAAAAAAAIQCgxoAAAAAAAAAACAUqKkBlBP+Og66xkPDhg2d9q5duwLznOtciP7lu3fvdpbpfIZ6W6VZY8O/Lzr/77p16wJrbOic6Hq5Py96onzrWVlZgcsBIF356weJJk2aBOaI1zHB396xY4ezbMWKFYH5b8uKjls6P7Du82vXru20GzRoELcuSaNGjUpwTwGUNn/ebTFw4MDo7dmzZzvLZs2aFVirIjc3N/D8vDjq1asXt92yZUtnWadOnZz2d99957S/+OILp/3DDz/EraGh6e8YuraSfvy8efPi1tQ49dRTnXbfvn0D+17yoANA+tTYKE59jVjXmfLz8+MuS9TWNTX816j066fGRnrhShoAAAAAAAAAAAgFBjUAAAAAAAAAAEAoMKgBAAAAAAAAAABCgZoaQDmUKP+3zvGt84lv27bNafvzou/cudNZtmfPnsD8hTq3bmnRed51Dl+dN17X1NBtf00N/+1Y29LHX9fnAICw0DWadH553ddu2rQpenvDhg3OMp1b158bN53omLh8+XKnPWfOHKet89Xn5OREb9eoUSMwHgNIb/Xr13faPXv2jN7u06ePs+zrr78OrE2h+8+jjz46ertWrVqmtF5D7969A2t96D6tWbNm0dszZsxIqWaGppfPnDkz7rbWr18fWB+vV69eTrtt27ZxXzMAIHGNjQNVX6Mkamz462Lobenn0nVdE7X9qLGRXpipAQAAAAAAAAAAQoFBDQAAAAAAAAAAEAoMagAAAAAAAAAAgFCgpgaA39R8qFu3btxcubHqYPjzi+tc44lqbOzbt89pFxYWmtKg8yT664KIjRs3Ou2KFd0xYF03w18XQ+dI1zUz9GMrVaoU+H4AQFjovO86F3uXLl3i1qLQNTZ0PQ4dX9LF1q1bnfbixYud9rRp05x2dnZ23Pjgz/keq2YJgPTmrzdx/PHHB/YNc+fOddpfffVV3LzdHTp0MKVF137T3wOOPfZYp33QQQfFranx4YcfOu1JkyYF9vup1NvQ21q2bFnc+iZi8ODB0duHH364s6xp06ZOW5/LA0B55a8RoetHpGuNDX2tR8cP/VzFQY2NssVMDQAAAAAAAAAAEAoMagAAAAAAAAAAgFAg/RSA36RW0lOuGzRokHRKEJ2GY9u2bSmlo/JPM9TTBg8k/Vx6vxOln/Ifs9q1awemD0mUjsr/eJ2aCgDCRKcz9Kcp6d27d+A0c52OUE8VT5d0VHo/165d67Rnz54dNwboNC96irpOR5WVlVXs/QVQOn1enz59nGULFixw2q+++qrTnjx5stPu3r179HbDhg2dZfXr1zdlRT+3v61TDur9btSokdP+8ccfA1N06ZQhQct0Oy8vL24M+eWXX5xlAwcOjHvsddpAACiv9HlqcdJR6W3payI6paDu4/U1qVTo7xykowovZmoAAAAAAAAAAIBQYFADAAAAAAAAAACEAoMaAAAAAAAAAAAgFKipAeA3dB0HXRNC58fdtWtX3BznusbGjh07AnMh7tu3L25di7KssaFf14YNG+LmSNc5z2vVqhVYs0Tnj/TX79DHnhobAMJE929NmzaN3j700EMDa1ME1W8SK1asiPvYsqRrRy1dujRuH6/79GrVqjltHU9atmzptKtUqVLs/QVwYOj6EUcddZTTnjlzptP+4YcfnPbYsWOjtw855JC0qakRRNee6Nu3r9PWNTdmzJjhtL/++munPXXq1OjtVatWOcsKCgoC90XnX58yZUrcXO3z58932oMHD3baPXr0cNqtWrUKxfsBAOlaY0N/R9CP1f2qrrmka9glignpWGOD+hrFx0wNAAAAAAAAAAAQCgxqAAAAAAAAAACAUGBQAwAAAAAAAAAAhAI1NQAkVLmy21XUrl07bs5gnUtc50DXbb3+nj17orf37t0buG5p0vuia4X4czr662vEqqGhl+uc6P4c6zrPoq6x4c/NDgDpzt+HtW7dOrCGxqZNmwJrGfnz3+bn55t0peOev8aGjgf16tVz2g0aNAjMN5yTk1OCewqgJOl84V26dHHaxx13nNOeO3du3HoTuvZEWGo66D5Nt3WdoE6dOsWtM/LZZ585y77//vvAGhqavwagrqmh86nPmzcvsKbGKaecErd2iO6XyZkOoLxIpcaGvkai40OzZs0CazbpmOk/v9Z9eqr8jy/J+hr6GOjjQ7xIHVfDAAAAAAAAAABAKDCoAQAAAAAAAAAAQoFBDQAAAAAAAAAAEArU1ACQMl0Dwp//0F8TI1YdjB07dgS2/fludX71ffv2OW39XKVJP3dBQUH09urVq51l1apVC8yxrGuW+Nt6ma6hkZWV5bTJwwggLGrVquW0c3NznfbBBx/stJcvXx63xoaOF7qORTrx12TSed11DQ2db17nE/bX5NDHE0B60Z9fXadBtydMmBC9PW7cOGdZ165dnXbPnj1DWXNN1wLp3bt33NohzZs3d5Y1bdrUaeu6I/786olqbuhlM2fODNzW+vXrnfa6devivoa2bduGov4JAJRmjQ19TUTXbdX1iXTNDV2Xzv9cug5GcWps6McWt8aG53kx62vEqrERllheljhCAAAAAAAAAAAgFBjUAAAAAAAAAAAAocCgBgAAAAAAAAAACAVqagBImc7t589nqPOBJ6qxofOe+9t6XZ0zfe/evU57//79prT4cyHq2iD5+fmBx6tSpUqBNUr8+SV1PQ69rq65odcHgLCoW7eu027Xrp3T1nnK/TluCwsLA/PdpmuNDZ3Hff78+YG513WuXX+80Hnba9asWYJ7CqCktW7d2mkPHDjQac+dOzd6e9asWc6yqVOnBtYkatSokQkjfY7cpEmT6O1BgwYFxghdU+PDDz902pMmTYpZk6kofbV/W7o+kq5vcsIJJzjtPn36OO1mzZo5bfpuAJnKX/eiRo0agdcxdO1QXUNDx4sgpVljQ18n0vzXrPT1K31eoOuIpPKaywtmagAAAAAAAAAAgFBgUAMAAAAAAAAAAIQC6acAFJt/GpyeMt2wYUOnvWvXrsCUIAUFBXGX6cfu27fPaW/bti2lqX8lyf9cej/80yyTSRnln1qpp2XqaZf+1COxpiTq5wKAdKX7s6ZNmzrtQw891Gn7U07pdIQ6HZU/NUiseFJWdIrGtWvXOu2ffvopMF740xvqWNOmTRunTUoTIL3o9HI6bdFhhx0Wvf3ll186y3Rbp6zo27ev065du7bJtOOl2zoFl/4O0rhx47jpuxYvXhyYbkrTy/3tvLw8Z9miRYuc9s8//+y0jz32WKfdo0cPp61T+wJAJtIpu3VaWn1eWxwHMh3V0qVLA69J+a9h6e8r+nuBTi2r415V9d2pPGKmBgAAAAAAAAAACAUGNQAAAAAAAAAAQCgwqAEAAAAAAAAAAEKBZOsASpSu6VCrVi2n3ahRI6e9c+dOp+2vo5GopobOQbh//36nvWPHjjKpsaGfR9fY0PkidS5Ef90Mffx0TnSdX13X0NA5lPX7AwDpSvd3Ome8v46GjiVbt24N7IdXrVpVZjWYgui4p/Py6roZ/nZQLImVb548vEB60Z/R4447Lm5dhrlz5zrtMWPGBOYe79Spk8l02dnZgXVFWrVqFb09ffp0Z9nXX3/ttH/88UenvXLlyrg1ABPV25gyZUpgjaeFCxc67SFDhsSttaLjoM6vDgCZQp/z1qlTp8g1NvS29PUYfb7t7+N13b7i1tjwX7PS17P09S7dPuigg5x2Tk5O3HN//ZozFTM1AAAAAAAAAABAKDCoAQAAAAAAAAAAQoFBDQAAAAAAAAAAEArU1ABwQFWpUsVp161b12k3bdo0bt5AXRMjUY5BnZNw3759geuXFl3rQ+dMX79+fdw6GTqnfKKaGvp463yR/sdTXwNAmOgaQ/786Dq/+Zo1a5z2hg0bAmts6Py36SJRjQ1/n69zDetc67rGkq5xVV5y7wLpSn+Ge/fuHb29YMECZ1leXp7T/u677+LWYRCNGzcud7UY6tWrF7fdokWLwJojP/zwg9P+7LPPnPb3338fWEfDb8+ePYHvna4Bpd/rHj16RG8PGjQo7t9IrNel+30AKI81NvR1D30NRX/H8J9vb9y4MfDcPBH9HcVfV0lfvwqqNxur3bFjx7jX1vR1o0w9z2emBgAAAAAAAAAACAUGNQAAAAAAAAAAQCgwqAEAAAAAAAAAAEKBmhoADiidu6969eqBOX39eWcT1dDQOQX18t27dzvtvXv3xrxd2nTuRJ1ncd26ddHbNWrUCDx+VatWddqVK1cOrKnhp/MsUmMDQJj4c+nqPLr+HOS6X41VU2PhwoXFypdbWvR++/Pyzpkzx1nWvHlzp92gQQOnnZWV5bTJvQ6kF3/dm6OOOspZNnPmzMCaGmPHjnXahxxySLmrqRFEv35dm8JfsylWf6prAs6YMSNu7aOgehuxluu2v5//5ZdfnGVHHHGE0x4yZIjTPvzww512eX/fAZSPGhv6e4G+RqJraDRs2NBp//TTT9Hb8+bNc5atWrWqWHX5/HWUli9fHnj9Sn8f0deNtqvlBx98cPR2y5YtA2uQZEqNDWZqAAAAAAAAAACAUGBQAwAAAAAAAAAAhAKDGgAAAAAAAAAAIBSoqQGgVOkaD7qugz+foc4puHPnzrj5CMWOHTsC1/fXstD5CD3PM2VFv05/Ll2d/7FKlSpOO9Fy3fbXzdDvhc6vHlSPAwDKmr8/y8nJcZZ17tw5MF7o+OCv56Rzout104n/dem6INnZ2YG51HWNpnbt2gXGZwCly38O16lTJ2fZoEGDnLauqeOv8RCrnZubG7evKI90XbkmTZoEHm/dX/qP7wcffOAsGz9+fEo1NjT/+tOmTYtbb0Pk5eUFfsfo379/9Db1NQBkEn+NCF0/onXr1oE1NXQc9F+T0jXn/PU2YtVRSqXGhv6OsXr16sCasbq23lb1/ca//r59+wKPQabU2OCKFQAAAAAAAAAACAUGNQAAAAAAAAAAQCgwqAEAAAAAAAAAAEKBmhoA0iqHrT+Hd+PGjQPzwibKKahrbPgfr3MMbt++vcxqbOjn8u+Lzm2o27qmRtWqVZ12tWrV4uZn1sc+UY0NAEhXNWrUcNotW7YMrJmRqGaTP36sXLnSWbZ3716TLvy1otauXess+/nnnwOPkY4fOga0adMmepv6GkDZ0vm+e/To4bQPPfRQpz1hwgSnPW7cOKd98MEHR2/36tXLWabPD/Hb+hO6HVSjRLd1fROdjz2o5oaOP2vWrHHaY8eOddr6+068+hqCGhsAMoW+ZqJraOhrJPo8119HI9H5s36uJUuWFLnGhv5+smHDhsDlu1TNDf/3Hd3/62tO/vP8MNfYYKYGAAAAAAAAAAAIBQY1AAAAAAAAAABAKJB+CkBa8adH8k/7i5WOSqeX0m2dUsqfXkSnItHTufVUvtLknxqoU2zpaYD+45XMVEr/cp1qRCP9FICw0n1f69atA+OFnt69fv366O2CggJn2aZNm0w60im0li9fHphSJlE6Q3+88KdW0csAlD7dpw0aNMhp//LLL0571qxZTnvKlCnR261atQo830Zi/hRTOq2TPr46/dQHH3zgtMePH590OipNr6u3FYR0VADKC30NJScnJ25K70TpphIpTjoqfY1KfwfZv39/3OtIidKpp5KOKtF1o7KUvnsGAAAAAAAAAADgw6AGAAAAAAAAAAAIBQY1AAAAAAAAAABAKFBTA0Da8ucyFPXq1XPazZo1C8wnrutR+Gts6HobiWps6HZp0bkO9Wvy530X1atXT7qmhs7BWFhY6LRbtmxZxL0GgPSiazTpfPRdu3Z12itXroybo1zHCx1P0oXeL53TV+cI9ufOFQ0aNEi6xlWq+YUBFI+ud9CrVy+nffjhhzvtL774wmmPGTMmbn84YMAAp60//0jtvdFtXaNIf7/R3zkmTZpUpFzsiWps6Odt0aKF0+7Zs6fTpp8HkKl0/+Y/J9a1JorLfz6eap+ubVGP9287UU2NRDVl/ecGOo7p2nplGR+YqQEAAAAAAAAAAEKBQQ0AAAAAAAAAABAKDGoAAAAAAAAAAIBQoKYGgLRVsaI77lqjRo24+b7F7t27nfaOHTuc9tatW+PmGtf1OHTO9IKCAqedKEfhgaLrYOjXsW7dusBjVqlSpbh5FPXx6tevX7H3FwDSgc71qnPDdujQIW4ect3/65yzS5cuDVyeLnS8WL58udOePXt23LpKOvd6lSpVAuMxgNKl6yEcd9xxTnvBggVO+9dff43e/uqrr5xl7dq1c9qdO3cuwT2Fjke6/p2uGZiVlVVi+df9sW3GjBnOsmnTpjntVq1aOe2cnJxiPTcAhLGf1jXnSrLGhq53V5I1Npaq7yf6+pX+vqK/72zatCnueUHTpk0Da2+VZo0NZmoAAAAAAAAAAIBQYFADAAAAAAAAAACEAoMaAAAAAAAAAAAgFKipASA0/PUgRK1atZx2w4YNA+tkbNu2LW6+Qt3Wj9X1OtIlZ7qu/aFfx5o1a+I+Vr/GjRs3lvDeAUB6qlatmtNu3ry50+7atWvM2JFMe9WqVYG1kNKFv86UWLx4cdz86jqfsM6VS00NoGzpz2ivXr2c9vHHH++08/Lyore///77wMc2adIksCYRTGCfr8+vJ02a5LTff/99pz19+vS4Ne9036u/G+3bty/pGoC6rtJnn30WWNujf//+gX9zAJCJdL8blhobW9RjdY0NfS3IX0NDf5/Jz893lh1yyCGBNZjq1q0bWCu3JDFTAwAAAAAAAAAAhAKDGgAAAAAAAAAAIBQY1AAAAAAAAAAAAKFATQ0AoVW5stuF1a5d22k3btzYaW/fvj16e/PmzYE5B/3rxso5qHPWFhYWmnSga33o3Ij+PL/6NVJTA0B5VbNmTafdunXruLUn1q1b57Q3bNgQWGNDx5t0oePW+vXrnfavv/4avV29evXAOlNHHHHEAdlHAEXTqFGjwM+ov27Dt99+6yz75ptv4tYYEtTU+C3/+faiRYucZVOnTnXan3zyidOePHlyYMzIycmJ3j7ssMMC6wmuXbvWaS9YsCDutvXzfPfdd067RYsWceNirJzqAFAehLXGRkFBQWBtVv19xx/XEtWf1W1dY0OfN/i/V+jjmSpmagAAAAAAAAAAgFBgUAMAAAAAAAAAAIQCgxoAAAAAAAAAACAUqKkBIGNUrVrVadetW9dpN23aNG5OQZ1XVrf1+jqfuH+553mmrOjn1rnd/bVAdE2NdM37DgClzV+jSefG7d69u9NetWqV09Z5Z3W80Hln04WOCcuXL4/erlixYmA9jhtvvPEA7x2A4tSd69ixo9MeOHBg9Pbs2bOdZTNmzAhs5+bmOu3s7GyT6XQ/rmtXfP/999Hbn376qbNs2rRpTjsvL89p63ziPXv2dNq9e/eO3u7Xr1/ge/HTTz857ddffz1ufQ9/nb1YdZWmTJnitPv06eO0mzdv7rSptQKgPAprjY1dqharjmv+5XrdHTt2BNZx1bVaDzrooLjX5fTxSrXGBjM1AAAAAAAAAABAKDCoAQAAAAAAAAAAQoH0UwAyhk6PkZWVFXd6vJ4yrVMv6SlzerlOH+Kflq6n55UlPbXcP1Vwz549cZcBQHnmjycNGjRwlnXo0CFwyrVu6/SFK1asiN7eu3evSVdbt26NmYoqVmpDAOlNp4jq0aNHzNti/PjxTvubb75x2l26dImbHqlSpUomE+h+XKfomjhxotP+6quv4qbr0lq1ahWYbuqYY46Ju7xly5bOsnr16jntnJwcp71gwQKnvWzZsujtNWvWBH5nmD9/vtMeN26c0+7WrZvTJv0UAIQ3HZWn0pj7r3/p+KBT1ubn5zvt1atXx92W6Nq1a9w0ijqFvL7GpzFTAwAAAAAAAAAAhAKDGgAAAAAAAAAAIBQY1AAAAAAAAAAAAKFATQ0AGUvn9a1Vq1b0dsOGDZ1lOpefzvun2zqPoL+mhs7D619W1vy5EgsLC51lug0AMKZ69epOu0WLFnFrT4h169Y57fXr18fNd6vjRTrxxwSdo1fHQADh0rp16+jtQYMGOct++eUXpz1r1iyn/cMPP8StEdGkSRMTBkG1jsS0adOc9meffea0p06d6rRXrlwZvV2tWjVnWb9+/Zz2Kaec4rR79eoVmG89lVoVOj4NGTLEaefl5cWtlZLou4+uFaLb/u9SuoYLAJRXQTU2SrK+hq6xUZz6Golipr6+pb8L6fiha/H5v2Poa1C67pS+bqcxUwMAAAAAAAAAAIQCgxoAAAAAAAAAACAUGNQAAAAAAAAAAAChQE0NAOVGlSpVYuYyFI0aNXLaLVu2dNobN2502jpH4Y4dO6K39+zZE7eORazlAIDw8NdnipX7tUuXLk578eLFTnv16tVxa1Oka3ygBhOQWfx1Gnr37u0sO+KIIwLrSYwZM8Zp+3OCDxgwwFmmz7fLUn5+fty6IPo16uXz588P3HbHjh2jt3v06OEsO/XUU5123759nXaDBg0C86+nQh/vww47LG7tkEWLFgXmQNeWLl3qtL/44ou4dVr031CNGjUS7jsAlAf+Pl732anW2PDUdab9+/fHrK8Rq+5FceiaGmvXrnXau3btSvr7jV5XX2c79thjA/eFmRoAAAAAAAAAACAUGNQAAAAAAAAAAAChwKAGAAAAAAAAAADIrJoaOv+Wzt1VUnkgAeBA8fdbOq9fxYruGG/NmjWddnZ2ttPOyclx2hs2bIjbX+rn2rt3b9zchwCA9KbPc+vVqxc3r7jo1q2b0165cmXceKFz0up4AQAlrXnz5oH5qxcuXOi0582b57S//PLL6O127doF1hg6kDZt2uS0ly1b5rRnzJgRvf3RRx85y7799tvAWno673m/fv3i1s3QNTV0TPDXMznQ9PeXXr16RW/37NnTWbZ8+fLAGhu6/f333zvt9u3bx6011bZt25T3HQDK23eKRDU29HX4QlXjbufOndHb27ZtC7wmpetilCRdF0PXZPJfe9P76a89KKipAQAAAAAAAAAAMgKDGgAAAAAAAAAAIBQY1AAAAAAAAAAAAJlVU2PJkiVOm5oaANKd7qf87X379jnLduzY4bQLCgoC85pXrux2n1WqVInerlq1auC6lSpVctrU1ACAshV0XptoXd3H6xzmubm5cfPjrlmzxlm2ffv2wFhEvABQ0mrXru20e/fu7bQXLVoUWKtiypQp0duHHXaYs6xp06YHrJ5Efn6+0544caLT/uCDD+LW1ND5vbVDDjnEaev6E8OGDXPaffv2jRsDypK+LuOPPwMHDnSWzZ0712n/+OOPgfFH14CaMGFC3FoqWVlZTrtJkyZJvgIAKD8S1djQNZq2q+8N/tpSus6Uvt61bt26UvuOoWts+McX9Gvw1x5MBjM1AAAAAAAAAABAKDCoAQAAAAAAAAAAMiv91LRp0w7sngDAAU4f4l+up9ft3r3baW/bts1pr1+/3mnr6Xx79uyJm9qqvKXo09PR9ftSsWLFcntsgExOy5RJr8O/vn6s7uN1ekI9xVrHlxo1asRNyVG9evXAqeL+WAMAB0JOTo7TPuKIIwLTEn377bfR219//bWzTKch0mmcEp33+c+3ddorfzqpWOmmxo8f77R37doVvd24cePA1zhkyBCn3aNHj8C0giWZVutA8u+nfi906jB9vPX5vf94ip9++il6+8svv3SW6eNN+ikASEzHSJ0usmXLlk67ffv2cftwff1KXw/TaaD831/0d51U6e9S/u9K+ruNvu6WCDM1AAAAAAAAAABAKDCoAQAAAAAAAAAAQoFBDQAAAAAAAAAAkFk1NSZMmOC0yXsOIJNqaug8gYWFhYE5Bjdv3hw3R6HOga63rZ870+gcxhrxA0Bp1+9IJT7otl6m+3SdC3br1q1Oe/Xq1U574cKFcWOJ3nam1DQBEB6VK1eOm6NbDBo0yGnPnj07bp0LXX9D16LQ9Ts2btzotCdNmhS9/eGHHzrL9HMtXbrUBPHXjOjfv3/ga+revbvTzs7ONplGvxcnnHBC4PHU14MKCgqctj+ezZw501nWtGnTwOOdrFWrVgXGSL5jAMgkier6Fah+2N8H6lpPOt7q7ytB9PWr4l7P8j9eX2fT19ISYaYGAAAAAAAAAAAIBQY1AAAAAAAAAABAKDCoAQAAAAAAAAAAMqumxvTp05POV0j+XwBlIZW+J1F+Qp0nUNfY0O1du3bFvB0r37p+rkzz9ttvl/UuAChninvuGfT4RHlkdTzYuXNnYM5af/5bXVND55XV2waA0qbrSfTo0cNp9+zZM3p73LhxzrJPP/3UaVevXt1pt2zZ0mkvWrTIaX/00UfR2xMnTgw8n27evLnT7t27t9M+8cQTo7cPP/zwwJoPNWrUMJmuTp06TrtPnz5Oe8mSJYHvjc7lHlSPY/To0U57xIgRpij8NVYE150AZLJEdf12qetOa9asid5eu3Zt4GOrVasWGJ/9265YsWKp1YhNtV9npgYAAAAAAAAAAAgFBjUAAAAAAAAAAEAoMKgBAAAAAAAAAAAyq6aGzqmoa2qQzxBAugvqp/QynScwUR/nX18/VucvzHQ657GOF/4cyDo/I4D0diBzqKarRP1/ovgRVFcpUT2n8uCbb75x2nynANJbfn6+065Zs2bcc97vvvvOaesc37Vr13ba69atc9rLly+Pux9HHXWU0x48eLDTPvLII512hw4d4tYJgTENGzZ02rruyNSpU5326tWrnba/RpSuF6XbRfXvf/876TqvADJLSZ4fFqfvKM3z1ER1YPeqmLtjx46kavrFquPnf6x+rnT+fsLVJAAAAAAAAAAAEAoMagAAAAAAAAAAgFBgUAMAAAAAAAAAAIRCBS/JhGDkKwSA8qWo+SKJFwBQvhQnv/CgQYNKdF8AlF2NjWXLlgXWUtC11HSe7sqV3ZKfDRo0iN4+4ogjnGXnnnuu0z766KOddqNGjZx2lSpVQnmuWlb7umHDBqf90UcfOe1nnnnGac+YMaPItaiS1axZs5T+ngCUH/5+JZ36+ANZg2O/6vMKCwvjLktUM1bX5/C3y7JGbKLjx0wNAAAAAAAAAAAQCgxqAAAAAAAAAACAUGBQAwAAAAAAAAAAhIKbtBIAAAAASsk333xT1rsAoJQkqnlQqVIlp52dnR293apVK2eZzvE9Z84cpz137lyn7c+xXpr51tMpt3vQvui85bq9adOmwPon/m0fqBzyq1evPiDbBQCEEzM1AAAAAAAAAABAKDCoAQAAAAAAAAAAQqGCl+TcwHSaNgkAOPCKOnWceAEA5UtxUo0QMwDES2nUoEGD6O2mTZs6y+rUqZNSX5JKX0O/9Fs6/dTKlSud9oYNG5KOCXzHAAAkI1G8YKYGAAAAAAAAAAAIBQY1AAAAAAAAAABAKDCoAQAAAAAAAAAAQoGaGgCAmMh3CwBIBjU1AADJ4jsGACAZ1NQAAAAAAAAAAAAZgUENAAAAAAAAAAAQCgxqAAAAAAAAAACAzKqpAQAAAAAAAAAAUJaYqQEAAAAAAAAAAEKBQQ0AAAAAAAAAABAKDGoAAAAAAAAAAIBQYFADAAAAAAAAAACEAoMaAAAAAAAAAAAgFBjUAAAAAAAAAAAAocCgBgAAAAAAAAAACAUGNQAAAAAAAAAAQCgwqAEAAAAAAAAAAEKBQQ0AAAAAAAAAABAKDGoAAAAAAAAAAIBQYFADAAAAAAAAAACEAoMaAAAAAAAAAAAgFBjUAAAAAAAAAAAAocCgBgAAAAAAAAAACAUGNQAAAAAAAAAAQCgwqAEAAAAAAAAAAEKBQQ0AAAAAAAAAABAKDGoAAAAAAAAAAIBQYFADAAAAAAAAAACEAoMaAAAAAAAAAAAgFBjUAAAAAAAAAAAAocCgBgAAAAAAAAAACAUGNQAAAAAAAAAAQCgwqAEAAAAAAAAAAEKBQQ0AAAAAAAAAABAKDGoAAAAAAAAAAIBQYFADAAAAAAAAAACEAoMaAAAAAAAAAAAgFBjUAAAAAAAAAAAAocCgBgAAAAAAAAAACAUGNQAAAAAAAAAAQCgwqAEAAAAAAAAAAEKBQQ0AAAAAAAAAABAKDGoAAAAAAAAAAIBQYFADAAAAAAAAAACEAoMaAAAAAAAAAAAgFBjUAAAAAAAAAAAAocCgBgAAAAAAAAAACAUGNQAAAAAAAAAAQCgwqAEAAAAAAAAAAEKBQQ0AAAAAAAAAABAKDGoAAAAAAAAAAIBQYFADodW6dWvz/vvvR9sjR440TZs2NbVq1TIzZswo030DAKQXYgYAIBnECwBAMogXQNliUAMZobCw0Nx0003m3XffNdu2bTM9evQo0/2ZPHmy6d69u8nKyjKHHnqo+e677+Ku+8knn5j+/fub+vXrm0aNGpmzzjrLrFixIua6d955p6lQoYITOAEAqcnkmLF3717zv//7v6Zly5amTp065vTTTzfr1q0rpVcCAJklzPHC74UXXrDfIZ588knnfvlO0b59e7u9vn37ml9//fUA7TkAZLYwx4vp06ebXr16mezsbFOvXj1z1FFHmQkTJjjryPeN4cOH2+Xyb/DgwaXwKoBgDGogI6xZs8bs2rXLdO3aNeZyz/PMvn37SmVf8vPzzcknn2xuuOEGs2nTJnP99dfb9ubNm2Ouv2XLFnPHHXeYvLw8s2TJEnsR6uyzz/7NerNmzTIfffSRHfkHABRdJseMxx57zA58fP/992bt2rWmbt265sILLyyV1wIAmSbM8SJi1apVNjbo1zBv3jxzwQUXmCeeeMJue9CgQWbYsGF2cBwAUH7iRatWrcx//vMfs3HjRrv+H/7wB3PSSSeZnTt32uXbt283AwcOtIMk8h1kw4YN5qGHHiqV1wIEYVADZUpOouUE2u+dd94xnTp1sp3+3/72N9OuXTs7YjxkyBCzePHi32xDpvXJ+qJFixZ2/chUwEceecQcccQRdnR67ty55vXXXzeHHHKIqV27tsnNzTV33323fR5/IJKLPzJwIKPP8mvYSEeerNGjR5vmzZubK6+80lSrVs3+v0mTJvb+WM4//3wbMGSKYs2aNc0tt9xifvjhB+cLhQS/K664wjzzzDOmatWqKe0PAGQKYkbimCGPk1+JyTZr1Khh7r//fjNmzBizdOnSlPYLAMKMePH/yMUs2R95rX6yz3KRSi50Va9e3a4jM/smTpyY0n4BQJgRL4xp0KCBHdiQGX2yL5UqVbKzTWRfxKhRo0xOTo6566677H5XrlzZ9OnTJ6V9Ag4EBjVQpuTizKRJk+xob8Rrr71mLrroIvv/xx9/3E6Lll8YHXzwweaUU075za+HZFrfnDlzolPiFi1aFF0mne8rr7xiO+SOHTvazlpGoAsKCsyHH35op2K/+eabdt39+/fb7UsHLcFGRp9HjBhhKlb878dETvgjU+1i/Vu+fLld76effrLT+/ykLfcnY/z48aZz5852P/yBtlu3bmbAgAFFOMoAkBmIGYljhuyX/4uRtCPPAwDlBfHiv/7v//7P7tPFF1/8m2V6e1WqVDFdunQhXgAoV4gX/49sQ35Ee9ppp9m40aZNm+j3DRmsGTp0qB3ckVRVn376aTGPPFB8DGqgTDVu3Ngcd9xx5o033rBt+XWQ/KI0EkDk16YyfU9+PSSduQSaKVOmJL39a6+91gYOGWmWzlk64Q4dOtgRaOnUzzvvPDNu3Di77tSpU80vv/xinnvuOZurXAKJ5JaVkW3x8ccf2+l68f7JKLuQYCXBwE/aW7duTbi/MsIvI/UyiBEhvwSQGRoybRwAyjNiRuKYIbM4nnrqKfulRrZ9zz332P2XL04AUF4QL4xNIXLbbbeZ559/Puby4sQfAMgUxIv/R7Yh68jr7tevn5POSgZirr76apveVr5/SF2/hQsXpnCkgZLHoAbKnIwAS6cp3nrrLVuUSDpjGeGW6XoR0pE3a9YsbhHtWCKdesQXX3xhty9T5yTPuJzky+i3WLZsWTRdR3FIShDJee4nbZmmF+Tnn3+2AU4GMI4//vjo/VdddZXNV6injANAeUTMCI4Zf/rTn+wXM/kiIl+Y5MuSPIf8KgwAypPyHi9kQOPyyy+3hcBLYnsAkKnKe7zwk+eW9FfyoymZwRLZnuyzzOCQWX3yf5mt8eWXXxZrP4HiYlADZU4K0klQmDZtWnSan5Dpbf4c4Hv27LFT/uT+ZEWm6UUef8YZZ9jR5ZUrV9pO/Zprromm6ZAcgnK/FHeKRS4eSWce719kqp+kiZo5c6bzWGnHKxgVuTglF6Ek36Iu6Pr111/bnOkS9OSf/DJAgu6tt96a9HEAgExBzAiOGfIrMpkmL1+K5PWfeOKJ9rUcfvjhSR8HAMgE5T1efPXVVzYeRL5DTJ482eZDP/PMM2Nur7Cw0KY7CYo/AJCJynu8iEViwoIFC+xtKRAOpCUPSAOXXXaZN3jwYK9GjRpeQUGBvW/UqFFeixYtvDlz5ni7du3ybrvtNq9Tp05eYWGhXd6qVStv9OjR9vaSJUskCnibNm2KbtO/XMh2K1as6H300Ue2/f3333sNGzb0hg0bZtv79u3zevbs6V166aV2O/I8EydOtM+dio0bN3r16tXz/vWvf3m7d++2/8/Ozvby8/Njrj979myvUaNG3gsvvBBzeV5envNPjolsM972ACDTETPix4xVq1Z5S5cu9fbv3+/Nnz/fO/LII70//elPKe0TAGSK8hwvVq9e7XyHOOKII7z77rvPW79+vV3+66+/ellZWd4nn3xi9+Xee+/12rdvHz0OAFCelOd4Ifsza9Ys+3zbt2/3Hn74YXscFi5caJfL/yVeyHqyj/J/aUeWA2WFQQ2khXHjxtkAcN5550Xvkwsyjz76qNemTRvbIZ9wwgneggULostTDSDiueee85o2berVrl3bO+WUU7wbbrghGkDEypUrvXPOOcdeMKpbt643YMAAb8eOHSm/Hgk8Xbt29apXr+5169bNmzx5cnTZsmXLvJo1a9r/i9/97ndehQoV7H3+f5HlWqzXBQDlCTEjfsyQL0dt27a1X0Ryc3PtlxI5NgBQHpXneKHJcz7xxBPOff/5z3+8gw46yG7vqKOO8n755ZeU9wkAMkF5jhcvv/yy16FDB3tfgwYNvGOOOcYbO3ass71PP/3U69y5s12ne/fu3meffZbyPgElrYL8p6xniwAAAAAAAAAAACRCTQ0AAAAAAAAAABAKDGoAAAAAAAAAAIBQYFADAAAAAAAAAACEAoMaAAAAAAAAAAAgFBjUQEY7+OCDzccff1zWuwEACAFiBgAgGcQLAEAyiBfAgcOgBjLanDlzzMknn1zi273xxhtNy5YtTZ06dUzz5s3NLbfcYvbs2RNz3eXLl5tatWo5/ypXrmxOPfXU6DrHHHOMqVatmrPOqlWrSny/AQDpHTOSiQkFBQXm/PPPt9tr3LixefDBB0t8nwEApR8vInbu3GkOOuggU69evcD1EsUD4gUAlC3iBXDgMKiB0PA8z+zbt8+kg+uuu878+uuvtuOfNWuW/feXv/wl5rq5ublm27Zt0X/5+fk24Jx77rnOeo8++qizXrNmzUrp1QBA5glrzEgmJsggicQSGTSfOHGiGTlypHn11VdL4ZUAQOZJp3gRcc8995hWrVolXC9RPCBeAEDJIV4A6YVBDaS11q1bm0ceecQcccQRJisry85weOaZZ0yXLl1MzZo1zUUXXWQ2bdpkzjnnHDui3KNHD3vhyP/4999/394eNWqUOfTQQ+2Ic6NGjezo85NPPlmk/ercubN9/khgq1ixolmwYEFSj5X92b9/vznjjDOK9NwAgPITM7QdO3aYt99+2zz00EN2gLxDhw72S8iLL75YpO0BQHmUrvFCTJs2zXz++efmjjvuKFY8IF4AQPERL4D0xaAG0p50/K+88or9par44IMPzKRJk+wFoS+//NIMGDAgOqosAeL2228PnPongWjlypXmnXfeMbfddptZtGiRXSbblA483j/5pa3fn//8Z5sSRIKR/OpW9iEZEhguuOACU716ded+CSDZ2dk2CDIiDgDlO2bEiwnz5s2zqatk3yPk9k8//VTkYwYA5VE6xou9e/eaK6+80vzjH/8wVatWDdz/RPGAeAEAJYN4AaSnymW9A0Ai1157renYsWO0/Yc//MFe6BESPCpVqmT69u1r28OHDzdXXXVV3G3l5OSY//mf/4nmLJdR85kzZ5p27drZbWzevDnp/frjH/9o//3yyy/mjTfeME2aNEn4mGXLlpmvvvrqN2lHZORfRvoluI0dO9acffbZpnbt2ub0009Pen8AAJkRM4JignyZkl+Fya/EIuRLztatW5PeFwBAesaLxx57zA5m9+/f34wbNy5w3UTxgHgBACWDeAGkJ2ZqIO1JTQo/maIXIRd8dDsyeh6Lf10hHXdxO2pJK9K9e3fzu9/9LuG6L7/8sg08sr7fkUceaerWrWuqVKliBg8ebK6++mo7ag8AKH8xIygmyGwPmSIuv86K2LJlix30AACEN14sXLjQPP/88/ZCVTISxQPiBQCUDOIFkJ4Y1EDak9zjpUGKIUlnHu/fNddcE/exhYWFCfOjSx0NGdS44oor0uY1A0CmyZSYEe81ya/EZLBDUlhFyK+7unbtWoxXAwDlT7rFC0k7snbtWpvLXH7JO2zYMFNQUGBv//DDD7/ZbqJ4QLwAgJJBvADSE+mngP9fv379AkfUI2Sd9957z6YBkV/Szp492+Y+l1/TBhkzZozZsGGDOe+885z7ZXrht99+a6ceVqtWzU4dlFH3kSNHFvs1AQDCFTMSxQT59ZcUIrz77rvNW2+9ZdatW2f+/ve/24KDAIDwxgtJNXjcccdF29999539MZRcWJJ6TFqieEC8AIBwIV4AqeHn4ECKKlSoYN58802b81Cm48mo+EknnWSefPLJ6DpDhw41I0aM+E2B8LPOOste1NK/2L3//vttfvX69eubW2+91Tz++OM2FyMAoHzFjGRiwjPPPGNjSYsWLczRRx9tLr/8cnPxxReXyesDAJQMuagk/XrkX8OGDW0MkduRIrD6O0aieEC8AIDMQ7wA/quC53ne/38bAAAAAAAAAAAgbTFTAwAAAAAAAAAAhAKDGgAAAAAAAAAAIBQY1AAAAAAAAAAAAKHAoAYAAAAAAAAAAAgFBjVQatavX28GDRpk6tSpY4YPH16m+3LLLbeY3/3ud/b28uXLTa1atcyWLVvKdJ8AAP8PMQMAkAziBQAgGcQLILMwqIFS889//tNUqlTJbN682bz33nsmXeTm5ppt27aZunXrBq63dOlSU6FCBbv/xTkG8nw1a9Y0J510klm9enXcdUeNGmWPlwS3yL+//OUvRd4eAIQJMSO1Pv7ll182HTt2tPuVk5NjzjjjDPsFKeK+++4zlStXdmLKO++8U+R9A4B0QbxILV588803ZuDAgXa/6tWrF3Od+++/3zRu3Nhe+Lvgggvs6wCAsCNepBYv9u7dawdfmjVrZvetb9++Ztq0aUXeHlDSGNRAqVmyZIk5+OCDTcWKqf/ZSWcadmPHjjV33HGHDZ7r1q2zXxTkS0KQrl272uAW+Xf77bcXa3sAEBbEjNT6ePnV2eTJk+0vvFasWGHatWtnLrvsMmedk08+2Ykp55xzTim8EgA4sIgXqcULufAk8eHxxx+PO0j+4osvmokTJ9rB8Y0bN5qbbrrpAL4CACgdxIvU4sUzzzxjPvroI/Pdd9+Z/Px8M2TIEHPqqacaz/OKtD2gpDGogVIhU/teffVV8+yzz9pfh8qJ8uuvv246d+5sfyEkI77Tp0+Prn/MMcfYC/gnnHCCPfH+7LPPTEFBgbnhhhtMq1at7K+G+vTpY/Ly8uz6cnFGlskIcaNGjczFF1/sTN2bMGGCHSCQ55Zfr27dujXuaPeYMWNMt27dTO3atW2nfO2119r7DzvsMPv/Fi1a2O288cYbKR0D+YJw4YUXmsMPP9y+pkceecSMHz/eLF68uEjHtKS3BwDpgpiReh8vr1NmaAj5oiFf1hYsWJDScwJA2BAvUo8X8nwXXXSRHfyO5aWXXrKDGB06dLDH8MEHHzRvvfWW2blzZ0r7BQDphHiReryQ+4899lj7emWGy6WXXmpWrVplB7uLsj2gxHlAKbnkkku8m2++2d4eP368V6tWLfv/PXv2eE888YTXsGFDb/PmzXb5gAEDbPuHH37w9u/f7+3YscM7/fTTvcGDB3srV6709u3b502fPt1bv369XX/48OHeeeed523atMnbtm2bd+6553oXXnihXZafn+/VrVvXe/75573CwkLvww8/9KpWrWr3RyxZskSGme1jRdOmTb1XX33V3pZtTZ48OeZ6EbLteP+6du0aXa9bt27eyJEjncc2a9bMe//992Mer5dfftmrXr26PQ6tW7f2rr32Wue5U90eAIQJMSP1Pn7ixIl2O/K8lStX9v75z39Gl917771e7dq1vezsbK99+/benXfe6e3cubNY7xEApAPiRdG+E3zzzTd2W1qdOnW8MWPGRNtyHGX/Zs6cmcK7AgDph3iRWryQfr9nz57ewoUL7TG67777vCOPPLLI2wNKWuWSHyYBEnvttdfsiG7//v1tW/L0Pffcc+aTTz4x559/vr1P/h8ZiZYR8dGjR5tly5bZfH6iR48e0WJP//73v82GDRuieWEfeOABO61Q6lJ8/PHH9jFXX321XXbKKafYNB3xVKlSxSxcuNBut2HDhuaoo44KfC3J5jOUkXudt1ba/hF6Pzk2P//8s2nbtq193VdeeaW55JJLzAcffFCk7QFAWBEzkuvj5Rdmsn15bf/6179Mly5dnF+nXXHFFfa1zZ071x5PeY6nnnoqqf0BgDAgXhT/O4Henux3VlYW3zEAZBTiReJ4IdeiDj30UHPQQQfZmRoya0RmrBR1e0BJI/0UyoTk+27durVzX5s2bez9ETJtL0ICR7Vq1Zz7/FP19u/fbx8vHaj8k2mAknpjzZo1dnqcTJfz020/CVSzZ8+2BVclSL377rumJMj0QP/0QyFtmVIYL4BI8JDXIa/t6aeftsFwx44dRdoeAIQVMSO1Pl7SUF1++eW2hsb27dvtffKlSqaqy+s85JBDzIgRIygUDiDjEC+K/51Ab0/yyMv3D75jAMgkxIvE8eK6666zr1v2f9euXfbHUDIYI+2ibA8oaczUQJmQCyvS8ftJW+6P8Bdvkg5/9+7dNl9hy5YtncdJW9aVjlV+RaTJiLh0xH5S9E7yHMbSs2dPO8ouQen99983Z599thkwYEDcYlLSkccj+z1nzhx7W3Iizpw5M7pMCimtXr3a5lVMRuT5I0WZirs9AAgLYkbqfXxhYaH9UiGPky9YWlEKJAJAuiNeFP87QWR7kkddyG25kCc1NgAgUxAvEseLGTNmmD/+8Y+madOmtn3WWWfZwuDffvutvc01KZQ1vtGiTMg0PylqNHnyZPvrn7///e+22NCJJ54Yc32Z5jZs2DBzzTXX2E5SOnfpYOUxTZo0MaeddpotyiTT/YSMhsvotjjppJPMypUrzciRI+1zyXTCsWPHxnyePXv22GmImzZtsgEjMpWucuXKdtqf3Ldo0SLnMTLlLt6/SPAQUlRJClFNmTLF/trpzjvvtIFJZmTE8umnn9rXKuTXAjfffLMZMmSILcBUlO0BQFgRMxL38VKoT2KFDHzL64kUeY38Ak1eX6So37x58+z2zjzzzCK8GwCQvogXieOFvEb5xa3sk5Db8s+/PZkhvmDBAjs4fs8999gULDVq1Ejx3QCA9EW8SBwvjjzySFtcXdJgyeuV1yPfNyKDFlyTQlljUANlQjo6CRqSHqNBgwbm7bfftrn5dD4+v1deecWOgPfu3duuJ8Fk586ddpnkKYxM8atTp47p16+fmTZtml2WnZ1t61DIVDlZR/KMX3DBBXGf580337Rpn2TK3I033mjbso9yIn/vvfeaoUOH2u3I/amQaXqPPPKIOeOMM2wwklF8CaIRclvSg0R88803dqqhjPRLMJHAIMEt2e0BQKYgZiSOGfIrqcMPP9z+Ukt+3SW5eOUYVahQwS5/77337BR2GRiXfRo8eLD561//mtI+AUC6I14kjhcTJkywzylxQAYt5LZ/wOKyyy6zF6qOPvpo+4tl2SfqLwHINMSLxPFCvivI65UZGfJ88twyyCHfKZLZHnCgVZBq4Qf8WQAAAAAAAAAAAIqJmRoAAAAAAAAAACAUGNQAAAAAAAAAAAChwKAGAAAAAAAAAAAIBQY1AAAAAAAAAABAKDCogWIbN26cqVevXlnvRkYZOnSoefbZZ8t6NwCgRBEvSh7xAkAmIl6UPOIFgExFzCh5xAyEAYMaQAm67777zGmnnVbs7Xz22WfmuuuuS2rdUaNGmUMPPbTIz1VYWGhuuOEGU79+fZOdnW1uvPFGs3fv3iKvn+r2AKA8Il4QLwAgGcQL4gUAJIuYQcwoTxjUQLFIZ4FwH7OHHnrITJo0ycydO9fMmTPHTJw40YwYMaLI66e6PQDlQ7r1fWGQbseMeAGgPPZ9YZBux4x4AaC89n9hkG7HjJiBIvOQsdasWeMNHz7cy8nJ8Vq2bOndeeedXmFhodetWzfvlVdecdYdMmSIN2LECHt769at3vXXX28f07BhQ++iiy7yNm/ebJctWbLEkz+bl156yWvXrp3XqFEj75tvvvHq1q3rjRw50mvRooWXnZ3t3XbbbdFtL1u2zDvuuOPsftSrV8878cQT7XYiLrnkEu+KK67wzjnnHK9WrVpehw4d7DYjNm3a5J111ln2OTp27Og9/fTTdh+S8eabb9rXW7t2bS83N9d7+eWX7f379+/3/vrXv3pt27b16tev7w0ePNhbtGhR9HGtWrXyHn30Ue/www+3+9S/f39v+fLl0cfefvvtXuPGje1227dv73300Ufe6NGjvSpVqniVKlXyatasaf9FXt9ll11m3wtZX/Z/+vTp3tFHH22fW47Lueee623YsCH6/AMGDPCeeOIJezvo+Mp2qlWr5lWsWDH6nHK8UyHbfO+996Ltd9991x6roq6f6vYAlD3iBfEiGcQLAMQL4kUyiBcABDGDmJEMYgaKikGNDDZo0CDv/PPPtwFh6dKlXpcuXbyHH37Ye+yxx2yHHrF69Wrb8UU6SOnozjvvPNtxb9u2zXZuF154oRNATjvtNLt8+/bttoOTDuzWW2/1du7c6c2dO9fLysqKBgF5zKeffmqXbdmyxQYD//NLBysdq6y/d+9e78EHH7QdeIQ899ChQ20QW7VqldenT5+kAsiHH35oO9uvv/7a27dvn7d27Vrb4QoJoM2aNfN++uknu1+///3v7fGRACvk+bt27eotXrzYLpfnl/0UX3zxhe0kV65cadvSYc+bN8/evvfee71hw4Y5+yGPq1Gjhvf555/b/ZBjNnPmTG/ixInenj17bKDv16+fDaLxAkjQ8ZWg2L17d+c5ZdsSdOL9u/baa+16+fn59lguWLAg+tj58+fb+yInDX6J1k91ewDSA/GCeEG8AJAM4gXxgngBIFnEDGIGMQMHEoMaGWrFihX2QyudU8Qbb7xhR3ClE5aAIeuIxx9/3AYbsW7dOttZSUfg7wBkfencIwFkxowZ0eXSkVWoUMF2jBESIGTUORZ5rIzkSmca6WBlRFzvu4wSy3PKc0+dOtUZZU0mgMhI//333x9zmezfn//852h7165dNohNnjw5GkCee+656PLXX3/dO+SQQ+ztsWPH2pHsL7/80gYAv3gBRN+nyYj6QQcdFDeABB3fWAEkWXLSIMdy/fr10fvkb0Duy8vLS3n9VLcHoOwRL4gXySBeACBeEC+SQbwAIIgZxIxkEDNQHNTUyFArVqww1atXN40bN47e17ZtW3t/06ZNzaBBg8wbb7xh73/11VfNxRdfbG8vXbrU7N+/37Rp08bUq1fP/uvTp4+pWLGiWbNmTXRbubm5zvPVqVPHZGVlRds1a9Y0W7dutbfXr19vzj//fNOyZUu7Xv/+/c3u3bujy0WTJk2cxwpZvmHDBpvvTx4b77njWbZsmWnfvn3c49O6detou1q1aqZZs2b2/nj7FNnfgQMHmvvvv9/cfffdJicnx5x55plmyZIlgfui93nhwoVm2LBh9jnlmFx44YX2tcYTdHyLo1atWvb/W7Zsid4XuV27du2U1091ewDKHvGCeJEM4gUA4gXxIhnECwCCmEHMSAYxA8XBoEaGatGihdm1a5dZu3Zt9D4JDnK/uOiii8xrr71mZs+ebebPn287QSEdtQSLVatWmc2bN0f/ybaaN28e3Zask6w//elPZseOHWb69OmmoKDATJgwwd4vM4USkQ66SpUqJi8vL3rf8uXLk3reVq1a2Y46FjkOcjwi9uzZY19z5Pgkct1115nvv//e7osEn5tuuinwuOj7r7nmGns8pXCRHJPXX389qeORzLaFFEKSzjzeP3l+Ub9+ffuaZ86cGX2s3Ja/g7p16/5mu4nWT3V7AMoe8YJ4QbwAkAziBfGCeAEgWcQMYgYxAwcagxoZSjonGb39wx/+YLZv3247uocffthccskldvnpp59uR41ludyOjGbKSPBpp51mbrjhhugorYyGjx49usj7Ih2kjOjKCPvGjRvtiHKyKlWqZM4++2xz33332dFV2Ze//e1vST326quvNk899ZQZP368Helft26dmTFjhl0mo9DPPPOM7cBlhP6uu+6yx+ywww5LuN2pU6eab7/91gadGjVq2BHqypUr22XyKwQ5rnv37k14TGSUWEa7JTg+9thjpqjkOVevXm127twZva9fv35m27Ztcf89//zz0XUvvfRS+7chx1b+jRgxwlxxxRVxny/R+qluD0DZIl4QL4gXAJJBvCBeEC8AJIuYQcwgZuBAY1Ajg7355pu2U5HR4aOPPtqcdNJJ5vbbb7fLpEOXkfAvvvgiOs0vYtSoUdEpftLBSWc0bdq0Iu+HBAwZnZYRU9mPoUOHpvT4v//973bkWabLHXPMMTagVK1aNeHjJBA+/vjj5vrrr7cjsvJ6fv75Z7tMXvONN95oTj75ZBs0Z82aZT766KNoIEjU+cuoeIMGDexjZTRdApUYPny4PWYNGza0xzAe2a+PP/7YritT/iK/SigKmbZ5xBFH2AAoz5nsrwYiZMrikUceaTp37mz/yXt05513RpfLCHpkFD2Z9RMtB5B+iBfEi2QQLwAQL4gXySBeABDEDGJGMogZKKoKUlijyI8GysBbb71l7rnnHrNgwYKy3hUAQBojXgAAkkG8AAAki5gBpAdmaiDtSaD48ccfbX4/uf3QQw/Z0WcAAPyIFwCAZBAvAADJImYA6YlBDaQ9yb8o+QYlx+KAAQNsXkbJNyjiFR2SnHkAgPKFeAEASAbxAgCQLGIGkJ5IPwUAAAAAAAAAAEKBmRoAAAAAAAAAACAUGNQAAAAAAAAAAAChwKAGAAAAAAAAAAAIBQY1AAAAAAAAAABAKFROdsUKFSoc2D0BQqJKlSpOu379+tHbrVu3dpb16tXLaffu3dtpd+vWzWm3adPGaderVy96u1KlSsXYa6D0EC+QqsqV3dOROnXqOO1WrVo57UMPPTRuX9qlSxdnWW5urtNu0KCB065du7bTrlatmtPm7xk4sPiMpa/q1asH9pe6P23cuHH0dnZ2trMsJyfHaevl/vPpWG3/ObHeD92uWbOm065Vq5bTrlGjRtw+X5/nV61aNbCtz8/5ewYOHD5fADKZPsfQ50L6emP37t3jXn/U1xrbtm0beB6mv4+HBTM1AAAAAAAAAABAKDCoAQAAAAAAAAAAQoFBDQAAAAAAAAAAEApJJ80ixzQONM/zivzYVP8e/c9VWFjoLNu/f3/gY/ft2+e0d+/eHb1dUFDgLFu1apXTXrlyZWAu4qysLKddsWLFuPmCw5rzDgAS9at79uxx2ps3b3baS5cujZv3XceDnTt3Ou1mzZo57YYNGzrtunXrxu2XdZ5TzoUAZLKgc16xZcuWuOfX27dvD+zHdV/rr5kRq+1fP1FNDV2XSdfU8Ld1/Q1/vY1k2jou+Gty6HP1RG3q5wEAUH7t3bs3pfOutWvXOu01a9bErHMW67xKX+PX51L+a5HpLBx7CQAAAAAAAAAAyj0GNQAAAAAAAAAAQCgwqAEAAAAAAAAAAEIh6aT8TZo0cdrkkUZZ1tjQf3+6rXPSBuVr1zl/df51ncdO19zYtWtX3HzB/px2YtGiRU5b5/H15+HV9PPqnHc6py9QVnR+xpKsn4P0od9H3c8mqk8UtC3d727atCnpvnLHjh1Oe+PGjU57/fr1gTU29PlOTk5OUvU2Eu0XAGRabmfNHwf0+bSuO6f7dX1OHNQOqpERq6aGbvvPofVj9fMmWh5Uc0OfD/nrP8Vq6xjir7mhv9skqs8RllzYAAAg8bVGsXXrVqe9bt06p71s2bKY32Fj1dTQ5zP6eqI+v0lXnO0AAAAAAAAAAIBQYFADAAAAAAAAAABkVvqpLl26BE5pDUpHRaoqJCOVlDT6709PyU40fX7btm3R2/n5+YFTuHRKqaD0VFu2bHGWrV69OnBauX4derqZf7/1a9ApUnRalEQpgIADJTc3NzAG+D/rpKIKD90/FRYWBk6P1X2lTgsVlJ5Kb9vfZ8d6rP+5dJ+u++Hly5c77ZYtWwb+/bZo0SJ6u2nTps4yPa1X98N62q5ODwLgt+k0g2JGqnRfodPkBZ1n4bfH3p++Ndbx9S/Xfb7u//Q5sT5v1WkQ/Mt136pTAeqUCkHLE6W90v26/nsNeq5E29b7pdv+9FT6+CRqB6Wy0t+dEqW2IpUVAAClT6f91Kk816u0yv50mzrdlE7FqdNrBqXETOfvsJyhAAAAAAAAAACAUGBQAwAAAAAAAAAAhAKDGgAAAAAAAAAAIBSSTozVv3//wOX+/Lfk3URp19TQOd50Pmadi27Tpk3R28uWLQvMLbdixYrAmhv+XO46h7zOcZdqTnp/PmK9TOc1btasmdPOzs6Om2uXOjc4kA4//HCnHfT3Rk2N9OZ/f3S+eZ0v3d+vig0bNgS2/TlBdX+m6efW+UT9fa+ubaT77FWrVgXW2Fi6dKnTbtWqVfR2mzZtAutvJKq54c/FrnPCc+6E8kp/rooTM4Jqk8U6l9q+fXvc2j163ZKMV4nOY4Nqx6V6Dqf3219XRB+fRHVF9Lb0eaxuB9GvI6jmg16u1w2qvxGr7c8brXNIJ6pzkah+h7+tv1Mkymet2/44oZfp501UZ0S/Tv8x08sS1efQ743/7zVRfQ69nO8kSJcaTADKr6AabKmeK5Uk3af5z1tjXW+s7Iu5+rxA95/6vEKfN/jPE/Rj0+l7a/rsCQAAAAAAAAAAQAAGNQAAAAAAAAAAQCgwqAEAAAAAAAAAADKrpsaRRx4ZuJyaGihN+m9MtxPV1Ni4cWP0duPGjZ1lOl+czgWr+fO165oauq1zyut8fHo//fmc9bb0ujqPsc4L6K+xofPrkc8WJWnw4MFJ57flby+9+fsR3cfouhZr1qxx2kuWLAmsVZGXlxe3Hofu3xLx75veT137Y+vWrYH9sn4d/hocK1euDKwFoNstW7aMW3ND1z3SeU11TnPOrZCpevXqVWLb8udAjnXupPstfy5i/7mhyM/Pd9q65kYqudv1uaSuUaDPy4LqJejHJsopH3SuqeuG6L5XH89E55r+5XpZadbn0MdI95/+5brGQ6J6HEH1OXQ7lfc1Ud0M/Vhdn0N/f0n0XEHb1nm19WN1DRN/O9X6JsWppUJ9jvKnJGswASi/9DmKru/o//6oz/8SXZcrzf3ermps+L/XJqodXLdu3cDzCP+5gI7t+tynLOMv35ABAAAAAAAAAEAoMKgBAAAAAAAAAABCgUENAAAAAAAAAACQWTU1OnXqFLjcn0NL59Miv2XmSJSbsjjvdSp5L/XzJKqpofPybtmyJW4uueLkMffX14iVb0/nLtb5mnUuv1TyHuvXqPMg+/Pv5eTkOMuosYGSdPjhhwd+toPiBdKL/73TfYyuTbF27Vqn3aRJk8A8nv7c2Dovtq5zUZxcpTqfvM6LqvtWvXzz5s0x8+/Hqr+ha260bdvWabdu3Tp6u3nz5oHHq379+k5b5zzXsYrPEsKqX79+JbYt/XkPyjWs8w3rOkCJtq3P8YI+j7oWgD7v0jV29HlagwYN4uY81nmOdb5l3X/691sfH12DKNF5rN62v52oPkeitj7euh10nqHPpzX9OoLO+1Op+aCXl2Z9jkR1MYLW1/U5dLxOVGPDv5963USP1e2gY6LX1dvWx0+/N9SmCr+SrMEEoPxK9L3WX2dNf8fV3wcTnb8cSHsDvufq/Vy+fLnT1tdBg2psJIqvupZZaSKyAwAAAAAAAACAUGBQAwAAAAAAAAAAhAKDGgAAAAAAAAAAILNqavhzucZCLmeUpUR1XHSuXX8eWZ3/LVF9jlQkqrGhc/4WFBTEzYucau7hoJoa+njo3M06Ry2fb6QiNzc3cDl/T+Gk87TrnOQNGzZ02jpHd1AuzkR5rkuyxkZxam7o/PP+ehuxcq6uXr3aaefl5UVvt2vXzlnWsmVLp61rbjRq1Mhp16tXL25co94GwpwjPajGWqK/ZX3uoz+z+rzM/znS+YF135CoRkRQHQe93/ozqmvo6P7A327atKmzTNfY0PR++/s4fd6p+z99/HRbr++vyVGcehyJ1k9Ub0PHK507Wy8PWpaoPkcQHdsSfedIpT6Hjqmp1uvw16fQ9TYS1a4Iqouht6X/PhPV69CP96+v416itn5u/zHQtWiIk+WvBhOA8kufY/hraOj6E7qWlj4v0NsqzZoamv9cSX9P1d9LdYzUMdRf701/r9exW5+/lGZMZaYGAAAAAAAAAAAIBQY1AAAAAAAAAABAKDCoAQAAAAAAAAAAMqumhs6RBYSJzunmzwGn85QfyPxviWps6Px7/lzHOsevXlfnCw7KLxyUr1pQYwPFofM4IzMlyt+tc4fr84igOhqJagbl5+fHXR6Uk7wo+dP929b553UO1a1btzrtTZs2xa25sWrVqsAc+m3btnXaLVq0SLrmRlC9DUEucaSTNm3aOG19jpLK36d+rP6M6s+G/7OgP/tbtmwJbPvrR8R6fNB+6X5J91s6z7H/867r8ejzWN336vND/34nqpmRqK37PH8fqett6HNeffx0O+i5i1uvw9/WMSDVGnZBubP1totTD6ok63PomJ0onuuYoZ/bX6tC19vQNTT0ch2fdLtu3bpxa3fpv/0mTZoEfp/x167RnzG9X/o1EiczrwYTgPJLn3Pouoj+uKbXTXR+qM9JSpPn6xP1fusalTre6vpu/hjrr68RK4YmOm84kJipAQAAAAAAAAAAQoFBDQAAAAAAAAAAEArklEK55J+Oqqcb66nKB5KeAqan2vunresp/npKu27r6bj+dtCyWG093Zt0VAD05173pbrf0Ov7+xmdHkVP202UWsS/3D9dONbUWp2SQ6cWCUqZop9Xp5vRbZ0ixT89ef369c6y1atXO+28vDynnZubG5ieyp++Kig1lU7nEesY+dNu0L/jQNNT2EtS0N+2/vzr9HI6ZahOe6fPy/z9kO4L9DlaKilD9X7r46U/3zrFlj6n8+9bKn1rMimk/OexxUkvFSudg//90esmSnWl10+lX08Uj4JSFqZrKiudrirVVFZBqbD0thKlttIxW7fr1KkTvd2gQQNnWePGjVNK0+hPT6Ufq1Nr+J831n4nSm2J9E9XCKD80rFcny/6Y6z+jrZy5crAuKVjZlBsP5D2q/OGROmo9Ovyf5fX6R0TpaMqzRSOzNQAAAAAAAAAAAChwKAGAAAAAAAAAAAIBQY1AAAAAAAAAABAKFBTA+Wezu+m60WUZI0N/VyJ2v4cwDpnqM4fnGqdjGSXxRJUY4NcpgBi5RfVfak/z6fODa77t82bNye9PFFtj1q1agXmxdZ58v35RnWOd523PVF+en87qN5GrHz+ifK5+mtq6Hobuh6HzjOuj5G/5oaOiTpHKpDO9Odb50z290vNmjVzlrVu3TrwM6n7Jf85m86frOsu6LzGup7Hpk2b4rb1Y3XOZJ37X/eJ/vUT1XjQy3UtkKAaHKnWpkil5oaOAbqt+/Ggvlv3xYnqhiSq3xFUryNd6nMkisGp8p/768+czi2ulyda3x+DdM5uHbtWrVrltPVnulWrVnHjoq7HkSh/uI6N1NgIfw0mAOWHPjfS5zf+80P9HVb3Q+laU0PTsV6fKwXVe9TfO3U9t/r16wcek6pVq5oDhZkaAAAAAAAAAAAgFBjUAAAAAAAAAAAAocCgBgAAAAAAAAAACAVqagAp1tjQ+VuDHpuoXbFixcC2P5e7zuGrcyjr3MN6eZBENTUS1efwHxN9vPRrAlA+6dyaDRo0iFkPQmzcuNFp6zye/r5R5xbXOTv9zxMrb7bOi6rzp/ufS9e10Dn2dd52vS1//vRU88vr/P36GPj3TecV9+cRj1VzQx9/fx5ynUfWX28jVk5anUcWSCe6Joz/86/zA+vPRaKaGv7PrK53kKgOg66hoT/D/nz++jy0Xr16TjtRHRx/zY1Ua+Toc8ugug6p1oBIpf5Eov4yUX2OoJoaiR6r+3n93vrXT7RfZVWfQ+cP1++rzi2e6DuF/3tBqrU9EvEfI33s9WdQ5wNfu3Zt3M+wXldvWx9P/Tr059Bfr4f6GgCQ3nQ/reuR+ft0/d1H12fU9dr0uZWOJ+liZ4JzUf93y7y8vMCaVYnqjuhjUpJ1eLnaCAAAAAAAAAAAQoFBDQAAAAAAAAAAEAoMagAAAAAAAAAAgFAg8TGQgM73pvOHp1JjQ9eX0Lnlgto6L6zO8avz3+oceToXdCo1M3QeZJ1b198OyjEryDMLQPeluu6FzmWva0DoGhv+/lD3V7rP0fnnc3NzA2t/+PN5L1++3Fm2YsUKp52o5oY//3dQvY1YbZ1PXT/enw9c19tYs2aN005Uc6NNmzZJ1duI9d7VqVMn8HgSA1CW9HmZ/+/TX7ciVv0dnXN/69atcT+ziWoS6HM0vS2d79/f19SvXz8wb7GuqaHrDPn7wFRraiSqBZfK9vS5pq5ZEFSDI6g+RDL9p7/ehK49kahehz7/Dlqu1y3N+hx6uf6b87/ORPU69HLd9r9X+r1IVLcvkVTeK30MdM0Nf1sv0++Nfi799xiE7z4AEK7zQX2u5O/H9Xcb/V0yUU2NdLVHxTl9Lur/Pqm/O+p248aNnbY+V9XfB/XxLg5magAAAAAAAAAAgFBgUAMAAAAAAAAAAIQCgxoAAAAAAAAAACAUqKkBlGCNDV1PIlFNjcqVKwfmlvPn49O5+XSO9IKCgsAcef5cujrPe6KaGkE1NHSeWZ1ztlGjRk67Vq1agccAQPng7w91LlLdb+i6DjrXvT///KZNmwLzZOsc3Pq59XP5+97WrVsH1vZYvHhxYM0Nf/5RXSdJ51YvTs0NnVdcb1vHAJ0X1f+69Gv219uIdbyaNm3qtHWdAn/+/2rVqgXGSOBA83++dc7kJk2aBNYkCKpxoD+fus6APlfSNQp0vn//Z1SfR9WtWzewT9Pnlv7zLl2PozTPyfQ5cqI6c0H0eWmi+hz+tn6sfq8S1ZMIqkdRlvU5dG7soHodibatv2Oksp+J6nMUp+aGfq8SvXf+z3Ci/UpUFycV1NgAgPSmzzn8/bY+zyrOeVesc5R0sUvFRf+5qL72mJeXF1hzMScnx2nr803/8dbng6ni2yMAAAAAAAAAAAgFBjUAAAAAAAAAAEAokPcFKCb/dCl/KqpY6ah0ao1EU+2D0k/paWyrV68OTF3gn1atUyj4U7ckk44qaBp/oin/jRs3TnoqGoDyQachqlevXuCU1rZt28ZNl6TTZOiUHLq/0yk69JRh/3PrVEvNmzcPTIul00/501Ppabs6BZROT6X7dJ3+w59yUPfxOq2GTkOit+0/Rjq26Nek3wudrkofE39Kn/r16zvLdFod/V6QngoH8hxO90P671On9dTT9P2fu6CURLG2lejz7f9M6v3Myspy2tWrV3faev2gKf/6nCwsaXJ036D7jlQkOudNJbVVWaay0m3d7/vbell+fn7g36du+9M+bty4Me6yWDFZx6uSTMuhP2f+bev3Tb9XxUmLpenPmf7MhuVzBgCZSl9b85876XMjnVpXf2/V32d0zEzX9FN7VMz0f0fesGFD0qmLY13z0+fU/nPV4pyzCb4dAgAAAAAAAACAUGBQAwAAAAAAAAAAhAKDGgAAAAAAAAAAIBSoqQGUIJ0zVec1zsnJCcyhGpTLL1GOZP1YvS/+XH5BeaCTqbERlE9Y56TV6+q2P7+6qFu3bonl1wNgQpkPvWbNmk67UaNGTjs3N9dpt2rVKm7OT92frVmzxmkvW7YssAaEv6ZGgwYNnGW6rfdT1wLxb1vX1PDX2xDLly932np9/br8fbzOpb5//36nrfOn65jgf7zOh65rfejjqfezXbt2cd87XZNE51/VOWr134U/DuqYB6RKn5PpvzddJ03XP/CfS+l6B7qd6NxIf4b9tYL0Z1CfKyVq+88Xdd+bqC8uD7n/dV+iz691O13rcyQ61/f/TepliWpT6boZ/tzaOnbpOkz6+Oq23m//MdKxLFX+x+s4WFz+z47+HOm/Gb1c19ggngFA6dLnN/5auXXq1EmppoauwaGv4+k4l652+fZT1wXR3/90rNff8fR1UH/dEV1TN9UYyEwNAAAAAACA/6+9O9mu4rrfBuz/soOxjcBg04MB0zl2JpnkLnIHmeYeM8sgo8QrmcQNYIHpO5vedmLyhXyLCetXr0/trUISqKTnGWmvOjo6TdXeVTrrvC8AMAs+1AAAAAAAAGbBhxoAAAAAAMAs6NSA19ixkXl8meVX8+Uya66VkbxoXHPOMws3c/1ynBn1mflbM2oz8/fp06eDca9zo95X5hNmjwiwOeWxnnPl0aNHB+MzZ86Mdk1kln3maGenRvZg1AzQzEjNfNDMn885rHZuHDx4sPl383EtLy8PxpcvXx6Mb9y4MfoaZD56K7M8b5+3zdev17lx69at0ffu448/Ht226DXJzpL6+rb6Np6TUc5ad2xkB0w938mOghznuVCOM6u4/n7mGuf5Xp4v5vb6vHqdGmkrdmzMtZ+jN677XKsfZtG8n9cRdS2sOeSLHmeuKbnv5+1bz2E1HRv5ek7t2Gi9t3mNlutRbu9d4wGwvnJOr/N2r1MjOxZrR+yi/wHW9SLXtY3kaTmvffLkSfP/g7Vba9E4z5nra9TrDu7xTQ0AAAAAAGAWfKgBAAAAAADMgg81AAAAAACAWdCpAa9R5sXt3r17NKu4l8Pby1CuOYGZmZz5672OjcxQr7m0vZzoKR0bmaub+YUy02Fzyjmql2V/4sSJFz/fvXu3OV/V7olFHRDnz58fzQrPjox33313MO71ANXnkXmr2c+RzzHHBw4cGIwvXrw42seR+fz3799v9o7UOT/n7Bxn9nr2d+T6Ujs2rl+/3uzU6HVuHD58+MXPe/fubb4XuQ9lZrn1g548z8qOnXpM9s51ej0Cmfdf56k8XrPfIPfl7L2o47xt7zjI7TkH6tiYRz9H9lHk/pf7Z14H7NixY/S+cs3NtS47N/K4yn2oPrbclsdJjtezYyNf77re53PM4yTHea6Qr5njCuDVqtcJOWfn/+yy8y+355pZu9E2cqdG6zwg+93yWrP2+S7qSaz/18vXJ88Len1vvqkBAAAAAADMgg81AAAAAACAWfChBgAAAAAAMAs6NWADyfy4mqnaykRe9Lu5vWbR5bbM6c0M9MxMz0y9ms+euby9nN7WOH+3ZvY+98EHHwzG27dvH4xlpMPW6B+q3QqnT59uzmdPnjwZjB89etTMAK0dG9nrkxmg+Tgzg7XOUdnpkLfduXPnYJx/Ozskap5/Zrvmc8oui+whqTmprb6N537++efmuNW5kX+39m0sepzZqXHy5MnRbXWfWPSaZGZ5vv51TbWWsEgew3Wfyv0vM5N75zetcfYA5THW69io49yWucW9fT+Pq3oc6QHY2Op7ne9775rixx9/HN2e+26vPyZv39pf83FO7UaqvRn5OPIYzY6NfM55blHX/+xwyv6dPHfI65fs66j357gCWH91rs2epN41Wvb65RpQ5/y8rtqonkZXXF5Pf//994PxzZs3m+PaJZmvT+96OvmmBgAAAAAAMAs+1AAAAAAAAGbBhxoAAAAAAMAs6NSADaxmqmbWXObGtjo0cvvUzORex0bN2KtZ7IsyajNbN7fXPPbcluPM4a3ZfIvyD+Wiw+aQ2Zr79+9fccZn5s3nfFY7gp5bXl4enWMyMzVzsjOXvI5zHs7nNLVzoz622q+xqFPj4sWLze6Kmnva6ttYSedGa5y/m/0m+bdb+azZqXHq1KnBOLcfOnSo2VFSX9987TPv3NqyNeUxXM85Ml85z1cyr7/XR1bHuS3P0fKYyzlwit6+ndvrcZS9Avl6sXHlPpZ9Eg8ePBjdx3J/zDUj1+jWNUXKnO3eOOfq+rzy7+ZzzOMoj+FWx0auGXks5Pqdt8/nUa/h8rbWH4C1V+fWnJNzDs+e13pdumh7XS/yWqi1Bm4k/441Mtf627dvN69F6/lib03UqQEAAAAAAGwKPtQAAAAAAABmQfwUzETGS2UcVS9Sqm5vbVu0fUocVcZL5dfMe5EL9fdzWy9+Ku87o2LqV9d8XRs253x48ODBwbaTJ082o1jy67K3bt0aHZ8/f775deKMo8oIvLq9NwdlbEbO+flV6Lo9o2/yNTl8+PBgfOPGjdF4qlY01WrjqXpRVfm7jx8/Ho0Sa71vzx07dmwwPnHiRDOeqr5muZbk16TzfRZPtTXVc6ncJ3IfyvOVPJ/J85+6Pbf1ztFqrOdq46hS7tutfV0c1XzjpnKfunr16mB86dKlFz9fuXKlORdndFXO82n79u0vft61a9dg2+7du5trcMY41muMXKsymjIfZ74meb1T4xMzdiPng4yqzGMjb1/X+4y1zOcIwNrKeTbn8DzHy+uu3F7Xsry2mUv81NN4nL3459Z1Wl635jVvxnklZ5MAAAAAAMAs+FADAAAAAACYBR9qAAAAAAAAs6BTA2Yqs4gzjzWz56Z0avTk79dM9dqvsagHI3MDM0e63j5/N/Nr83d7HRt79+5d2K+x6DkB81HzpjOX86OPPhqM79+/38zR/uGHH0bnrMwRz06N/Ns5L9fHmXNQT85R2alR817zvjNnvM6Fizo26rjVt7Hazo1W38aicXYD1Peq5pmvJMs1H3ercyNfn8zKzf0gs2AzH71molt7Nqc8r8p9IPeZKeczuS3luU8eC/W4ym29jox8Xq1x77b5mujYeLXqfpRzcXZoXLt2rbkOXLhw4cXPly9fHmzLfomcq3N/zvWr9mbkXJxdSAcOHGiuwXUNyTUh1/fs7kp5rlDvO/s4cr3O55iPM/Pa6+3zvrJnpPe4AZgm59Wp11k5rrfP69Bcj/P/YRvVv+OaLdf6vB6s15e9To1PPvmk+bedPQIAAAAAALPgQw0AAAAAAGAWfKgBAAAAAADMgk4N2CIdGy29DOXeuMp85h9//LGZnfvkyZPR7VM7M3o5061sQx0bMF/1eM1jed++fYPxxx9/PKljo2aE5m2Xl5cH4w8//LCZsVpzsmuvwqLxal6D7du3D7ZlBneuD60s2Fbfxmo7N+7cuTPat7EoVzY7Nep78/Tp0+bak/edfzufR82Qz76NHOdrkp1W2Z9QM9Bzf13tfsA8ztHyfc9zkjy/qePcNlU9T8sM5DyH6+VK5/itt95a8W3zNcl5y3nY2sr9pr732QGR82GudefOnRuML126NNpVkbna//3vf5vdKrVDI3uxTp48Odh29uzZZsdGrnW19y/7n3o9V/m4M+e83neuP9lR0luTsyejbs/jpB5zi+5LxwbA6uT5SM7DOWfn9WB28dXtvWuhuXRqPI3rsOydymvo2reVr9fOnTsn/W3f1AAAAAAAAGbBhxoAAAAAAMAs+FADAAAAAACYBZ0asEWy/2rG6mo6M3q3791X5sxmxm/N32tlSq9knFqZ1L2Ojcx+BjamzJfOXM7MNT116lSzU6PmgWdWeI7Pnz/f7FKo3RWZI569FmuZJ9/Lgm3le7f6NlbSuZGdGjV7vfZWLMo47+XM1vXkP//5z+i2RX0cmfWamfI1Az0f15UrV5o57jUD/rkjR46Mvkb5emYub+4n1qLNIXPue11AU0w5V8oOjdoLsGh7zq853rZt28KfF43zNcguhZyXdGysTs6RtdOuZlvnPL2oQyM7NupcnnNp5oHnnJZZ2sePHx+Mz5w58+Lnzz77bLDt9OnTzfUnj6u6f+f+lnNrvl65hmR+eO3c6HU8ZcdGPs7av5XjvG0eJ725xXEEsDrZf5ddRnn9l+d0e/bsGZ3v8xotu8/m4t/xuLNfq66D2eOV5wk9rowAAAAAAIBZ8KEGAAAAAAAwCz7UAAAAAAAAZkGnBmwRNUM181Uz07snc2frfee2zFvOHNnMma3Zu7nt2bNnkx5nK0e6lzmdr0lmJco1h3nIvOnMOc3+g/v374/OWdnD8Pjx48H46tWrzb9VM1RzTsnHmfP0emp1brT6NlbSuXHo0KHRfons1Lh48WKzuyJvXzs38r346aefmrnu+V5m9mtdf7JnJbtU8nHl4z527Nhg/PHHH4/mx2cmfL6emb2bPQXMUy8Hv3Welucvea7UGufv9jo27t6923zcNWc6M6dznOeHeV6Vc0vd1/UC9NWOh0Xn1PW9zDnrwoULzbk557w6R2b3ROaD576c8+Mnn3wyGH/66aej2/J3877zb9fHlnNn9mDkGpJrRm6va0iuN9nPketVdppknngd59yQzzGfVx5nuaYDsLZdhUtLSyvu2MheqbzmyLUm16qNqtctVc83e91QPf4rBwAAAAAAzIIPNQAAAAAAgFkQPwVbUH5lrhdzkLfPiID6FbH82nN+nSy/Bl3jQ/Lr3fl17V4MQmpFTPXiGabGUU39mhywMea7+hXgjAbKOKoHDx4056T8yvDy8vJgXL9inNEqGSvUi27ZKF+xnhpPVV/vgwcPDrblOKOYMhLl8uXLL36+efNm873oxU1lXMiTJ09G3+e6bdHfyiiRfGx1nOvY6dOnB+OMp8o4rxppJlZk88hzijyuWucrGTuU43pu1btt7tt5LGSkaJ2n8nyvF4uTzznPNXft2jV6X/TjpvK9qhFSGS916dKlZrRizlt1Ps15KKM1enFTv/nNbwbjs2fPjv5uL56vFY+Wr1euCRn58ejRoxWvKXnfvfUnzy1y/67xU/kcc27Ic5zWdZhrF4DVyzk746dyHazn8hn/mBG3ufbMJX7qWfxvLc8f67q32lh339QAAAAAAABmwYcaAAAAAADALPhQAwAAAAAAmAWdGsDkjo3Mvav5rJn7nlmumTmY45pFnhmCP//886SOjVanRm7rdWxkPm7m8NeM28yJBjaOPD537tzZ7HWoHRvff//9YFvOUZmDmuNz5869+PmDDz5odk/UDO1F23Pefl2mdm7U9SVf+3xNcu3JeXf//v0vfv7222+bGfDZ3/Tw4cNmfnrNPM++jcxHz9/N7fm3ak9L/XlR7n32d+Q6WNeqzOzVsbF51Oz7zNDP42Q1nRq5r+f2zP7Pfb0eZ5nX3+vU6HVs1HHOHVvxvCvfm5yHci7JObF2PmVfUWZ85/l1nn/XuSbnoewF6nVo5Pbao7Hafru6Pbspcn3pzevZWVJfk8w8z+MqX7+8rzzXqK9vXqPlsZDPK9fkep2mHxBg9XLuzHk6r+Hq9Uted+a1Y15DZCfT05l0bOTjfvz48ej/3fK2Pb6pAQAAAAAAzIIPNQAAAAAAgFnwoQYAAAAAADALWy+AFJickZ7Z7pmXWzs2Mtc4M717eev197O7o5cpmBm1mSdcezR6nRlTxzUbcWlpabAte0aAjSPnoOx1qHneOQdl7nVmbte80OeuXbv24ufz588Ptu3Zs2cwzqzrVjfFXNeT3nPKuTRfo5qvfvjw4cG2ixcvNvPkb9682cyMr90BmXee+bW5nuR+0OqD6u0zua61snMz0zf3ZWvR5pDvc84V2Q3Q6tSoP69knPt69grVcR77vQ6NKZ0b+Rpkj8Bm7AbodWjkHJa9GNmb8fXXX7/4+dKlS81M75ynci6pc01dM587e/bsS3do5P6ca8Rq3ud8Dr1+rV4PRh3nbXMNyPcy5/W873o9k69B7vs5zmu4uu7mdVbed24HYHq/4O7du0f/b3T06NHm9Ulea2bX3tOZdGrk+WRdJ7N3KtfAHisVAAAAAAAwCz7UAAAAAAAAZsGHGgAAAAAAwCzo1ABWnRNYs3R7Gck5zkz1uj1ve/369WbG4JSOjdqvsZLOjF7OdL39gQMHmjm9+byAjTO/ZaZ0zfM+fvz4YNu9e/cG4/v37zfnoHr75eXlZlfR+++/38zJzjzwOfYlZF53dgPkXJnvza5du0Z7BA4dOjTaZ7IoQz47N+p6k/nyvfc55XpRu1ly3crs9fzdVF+jXJvz9ct9Sl765pDnXTlX5LFRs4tz/8ps5ta5zqJxzXrODqLsP8hxzmGt88f83ZzHcy6Za8dGPVfNuSLnoTxHzg6NL774YjCuvU45/9VOoXwcOfdmp9GpU6cG2z777LNmx0arQyPn/bV8H3OfyeuR7HBqdSNlBnjmgWf/Sb6XmSeex13tWrp9+3azMyPXyRzXdaLXY5NrCgB9eT6THYH1mi+vV3Kcc36vY/bpTDo26uPMNXDqc3A1AwAAAAAAzIIPNQAAAAAAgFnwoQYAAAAAADALOjWANe3YyIzuzL/tdW7UTNvMI+xlKGfHRubd1szBvG3mQj979mwwnpI7nbfNbMTMIs4c33xewKuTc1Q9Xmtu+KJc0+zYePTo0WguavY0nDt3brSraFEfQuZo1+2bZQ7JNSDnzvoaZHdRdpTkPJzv5ZUrVwbjL7/8cvRxpHzfM9+2JW979+7d5u3zsdTugHwNslcg15rczuaQ+0juF/X8ppfl3+t4yXOhen/ZI5AdEL1OjZyL6zjPNXv9MHPt2Kivd+1VWLSGXLx4cTD++uuvRzs0nrt8+fLoOXG+r7kPZe9F7ZvKzozs2Pjoo48G47179zY7IF7Ve5V/Jx9HPs48Nn744YfRtb92zSz63bwGqfeVt8/zjuxSyR6MVsdGbut1bOQYgOl9tLt3737x8/79+wfbjhw5MhjfvHlzMM71+kmsL3Pp1Gj93y3HPb6pAQAAAAAAzIIPNQAAAAAAgFnwoQYAAAAAADALghGBNZV5rJkD38tQruNep0ZmKGd+4XfffTfasZE55g8ePBiM//e//zVzpFs51HnbHGeW+549e0ZzFzdLNj7MVe0hyJ6LY8eODcaZdZ25pzUnOzNQr1271sw/z3mi1ZeQOdmbRc6H9Tnn2pOvT65F+Xru2LFjNOM8uwEy276Vf75oPWnJ3819KHN5a9Z9r4cln2O+Zr3uEDZfx8bBgwcH23odG7k9s5trDnLtEFpJx0avk63VrTD1XKnODxupX6PVrZDntNkDdOHChcH4m2++aa4xdW7JeSfXkOwoyrWv9mZM7dDYqH0n+TjycebzqPv3jz/+ONiW47wGaR1H+ft5X3ncZHdSvpf1+M81IdeXXCPy9hvlvQKY03nY0tLSijsAc5znVg/j2rOuL3Ps13gZvqkBAAAAAADMgg81AAAAAACAWRA/Bayrt956a/Rrz714gV7cVC9+Ksd3795dGEW1kjiqjATIcf16X36NP8f5VcC8rxohkl8bF0cFr1Y95vJ43Ldv32B8/PjxwfjevXujcSsXL15s3nZ5eXkwzq8nt6KFWrF+m1XOjRmjkdtzzs9okfpe533lupb3nWtXRvZMkWtVK4ImI7V6+0zuz7nderP5Y9vyPc/zkV4cVet8Ju+rnoMt2rdzDuyd07Vk5FuOa3RQHvt5fK+nfI0yWqge71evXh1su3TpUnN8/fr10fvKc9Oc4zJaqRU3leNe3NRcI4xaMW7PHThwYHTf7sVR5e3zuqEeh7kt76sXWVhf/3wv3nnnnWb8VF535bGT2wFoX6PkeVhdS547cuTIYHzjxo3mnP+kxBuLnwIAAAAAANhAfKgBAAAAAADMgg81AAAAAACAWdCpAbxSmZ2bea41c7DXoTG1U2NKtnNm1j58+HDFnRqZX5jZ7b1OjTrOLOLMQJdfC69OZq3v2rVrMD506FBz3qi5p48ePRpsu3XrVnN87ty50e6dzGTNXOyt2JWQc3j2JOUacPv27cH4zp07o1n/uV5kXn+uc62M/pz/s7MgZX56fdzffvtts1Mj95mlpaVm9nruR2wO9fiv/RrP7d69e1LHRo7r7XPbs2fPmh0Pea6UOdGrkX+7PrY8z8rjIrsUVjN/5uv5008/NV+Ta9eujfYwXbhwYbRfZ9Ecl69vfe9zbjh69GizQ+Ps2bOjfVL79+9vvp6vsrPkVXXTZKdRnufnmtEb53tV17NcI/I4q3nqi/aDeh2Rc3yOc03Ijo1c6+rvb4XzDICXUc8rsp8pz93z2vLw4cOj1yt57dn7H9Rm4b9hAAAAAADALPhQAwAAAAAAmAUfagAAAAAAALMwz1BLYNPITojasdHryJjSodHT69jIDNvHjx+vOEc6s90zz7CVSZ050Jn9/N577zXzbYH1k5namUt+7Nix0V6H7Gn44YcfmnNMzVZ/7vz586N53jkv5OPMbp65ql0WOWfn65uv3zfffDMYf/XVV6N59ZlVn5nlmTOeXVH5+lf5uLMzIzP3cy2q3Sw3btxo9iPs27dvdJ/pdQnMNQefaedgmamfc1qek7TG2R+R3TM5zg6NKR0beV+9rpp6XpbnaHmcZN519gy0zrt6HRp57plzTZ2Hvv7668G2S5cuNXuBci7Jeaoe/7UTY1Fnxqefftrs2Dhy5Mho11T+3c0i3/e6rua5eu5juR/kep/nA/VY6HVo5N/K/q66TuR7s9qOjbpObNb3HWC16v+scl7NHsQDBw6MrrfP3bx5c/RcKdcHnRoAAAAAAACvkQ81AAAAAACAWfChBgAAAAAAMAsCcoENmzGYufDr2akxtWMjM21rHm5mKLc6MxZtr9nPmQOd48x+ziz3mom+1q8RbHV5POWclcfniRMnRjsfcpyZ27l9eXl5MP7www9H81h780KON6qc/2ru+HfffTepg+SLL75obq+545l3nq9Xvs85zoz5ugY8fPhwsO3WrVuD8Z07d5r56HVtys6BzOfPnPccZwdHzfnN52At2RodG9m/U+eZRV0WtVMj+zZ6pnRs5LZep0bmSNe+iZxr83wv86yzi6Z1rpr3lY/76tWrK56nar/Gojku54Z8L/P4/uijj178fObMmWaHRm6vv5vdK7nP5OPYrGrHRu9coNellJ0adXt2zfR6W/K+6/uRvRf53uXzyHHevt5fdtHoZQL4pbymyLkzz7sOHz48GB86dGi0XyuvMXL92CwdG1vjLAMAAAAAAJg9H2oAAAAAAACz4EMNAAAAAABgFoQbAhtWZnZndmvmgU+5v14eeG7PvPbMqK15zpmF28qc7nVq/Pzzz80sxBxn9nPmoNeMdJnosLYyMzqPv5p7eurUqWZnRuaj14zURd0L586dW5hvvqhj45133mlu3yhzQ85/Dx48GH0NLl++PNj2zTffDMZfffXVYJz59Pl61rk1157Ms/34449Hu1MWvR91DWi9j4vei8xPr30fuS5lN9SlS5eajyvHtYvl7bffbu5DbM2OjTwPq+c7vXOf3JezMye31/OwXldF/m7OJfVYyXO2HOffyseZHRv1Ncs86+zQyHnoyy+/HO3YyDku14x8zpnLffDgwcH45MmTox0aZ8+eHYyPHTvWzPiu+8VW6dCYci6wtLTUPFfvdWzUcd429+28psjtrS6qeo2wqEMj96mcD2o2fK5d+Rro2AD45VyZ83D2Ye3fv7/ZcXW70amRa0uez+R521w46wAAAAAAAGbBhxoAAAAAAMAs+FADAAAAAACYBWGGwJbt2GjpZQLnY6kZhZlHmPmFmXvc6tjIzoxWzu6i29cM/8x+ztdPDjKsrewhqJ0FmYGafRGZEZ/Heu1SeO7atWsLc9gXZb5nTnY+zpwb1kvOfTl/ZWZ8fY7Zm5FZ9MvLy4Pxt99+23y987HUzPjMkz99+vRg/MknnzQ7NfL1r3P89evXmznjmYfeylPP22aWbv6t7NDIzP3WelGz0xc9bjanN998c8XnYXku1OvQyOM/9+enT5+OHr8//fRTs1eg/m4eR73zqjzmcrxv377Rc6l8nNlr0+v6qR0cOR/mc+r1NuS8VOet7HjKOS/Pr3MNyf1iq8trhFxjs8cqz9VbHRtTO1/yuKvHRq+HKbuT8n1vnUv0rikyJx6AX55f59qe5xxHjx4djO/cufPi5/v37zevHXvXGHPhP1gAAAAAAMAs+FADAAAAAACYBd8VB7ZMHFW9ff5ub9z721V+nTsjGDJiIWNmWvFTGbHw5MmT5vb8WmG97xqv8pw4AVhbOU/UY2z//v3NaJCMGsmvEOexXm+f0Ut5rO/atas5d9YooW3btr2xlmo0RsZofPfdd824qYzV+uKLL0a33bhxYzDOv5Vf787opZMnT47GS509e7b53mWUSL7e9TXIeTcjZTK+JveD+rzyd3MtyrXmypUrza+013iqfJz5+mWUFVtDnifU/STPwTISqhev2YrVybk1j+/8W7m9RvLkeVI+rhzneVceN/U1yeM3Y/AybiqPyTon5uPIeTvn+YyQOnPmzGBcI6cyDjHfux07dgzGzg+nySim3nvXOjZ65/35uxlHVeNHcs149OjRYHzr1q1JcVS5LrRisMRPAfxSnt9s3769GWl7KK456jVJXjPkHJ/nWTlP5xqxUfmmBgAAAAAAMAs+1AAAAAAAAGbBhxoAAAAAAMAs6NQAtkzHRmbWtn43x5mHm2pvRnZoZE50yvzbmn/Yy3rOLN1WBnWOMycxM5RlpMPaql0V2bOQmag173xRx0bmot6+fXs0B/vcuXOD8fvvvz8YLy0tjea3Zu711Cz1nMNqxnw+zsuXLw/G33zzzWD81VdfDca1R+POnTvNuTIz4Q8fPtzMm//0009Ht2VWfWbq79y5czDOXpKaWZvZ/5mXnj0j2YtR94N8ztkjkOO6zyzK+69rZmb6Pnv2bDC2XpDzQ+bt5zlYnq/k/lmz//P2mftc59aV9ArUeSmPsbxtnis9fPiwue/X88V8Djdv3mx2/9y9e3f0ebz99tvNc7acl3INyXG9fXY86dB4fV00i96Puh/kNUXuY7mG5LFQ9+c8BvO22QmT+2t2bNTn1evQ+fWvfz0YA/BL2VWU1xj7Y72oa0J2auT5Tp7P5P+VdGoAAAAAAACsIR9qAAAAAAAAs+BDDQAAAAAAYBZ0agBbsmMjs4h7nRop88SzR6Ml8wrzd2t+YWbS5jjz6jMLOnOn6+17+dUy0mH9ZD56Hm8fffTRYJy5qNmtUI/nPJazKyH7PDKfteZk5+PMrPWUudn5OK9duzbamfHll18Oxrk9Ozdq3nfOoznHHz9+vJkv/9lnnw3Gp0+ffvHz0aNHm+9V9jf18ufr9nztDxw4MBifPHmymYdb94uc0zMLt5eXfvXq1WbOe2sd++1vf/vGy8oM+Hwve+sx85Dva84teSxkB0c97nJfz3OyXsdGHedxknNtnldlBnXOiXV/zb+bv5vj7DioWdo57xw5cqQ5V5w9e7Y5B9a5JnuV8vXj1Wam5z5V36uce3P+zPU/b1/3ybxGyGMh+zty/cljuPbJ5PVK3tfvf//7wRiAX8pz4Jx3d0f3YV0v8jwhu/TyfKfVybSR+zV8UwMAAAAAAJgFH2oAAAAAAACz4EMNAAAAAABgFgRmAlsyk7Bmxi/Kbk6Z15w5tHV7r18j8wszQ7nmN+ffzazDzKzN2+e4dmo8evSomZX7u9/9rvk8gLXLSM3+gn379g3GJ06cGIzv3bs3Os45JXPbL168OBi///77o+Neh0bOMdmhkT0N586de/HzP//5z9Ftz924caM5/9Vc2YMHDzY7M379618PxmfOnGm+vvX+9uzZM9i2ffv2N15Xt0q+73Xezm2ZYd7LS79z585orn72N+Xf+sMf/vDGy8rOl9Yaql9jvvJ8JbP/e+c3Na9/27Ztg215Ttc7N6rbszMjezBy388+j3wsdR/N+877ymMyuyxqVnZmY+cc98knnzQ7NvL36zzfeg68erku1Pfq0KFDzXm8d1zV2+e+nvtrHje9jo2qdx4CQF+vZ257XJO01ou8prh7926za6/O+bk+TOmTXW++qQEAAAAAAMyCDzUAAAAAAIBZ8KEGAAAAAAAwCzo1gC0p8wgzjzlzzbNDozeufvWrXw3G169fb+YX1rzmzF/OnOjMrG3l2+b9ZbdH5tf/8Y9/bN4XsHYyS33Xrl2DcXZGZF56PZ5/+umnwbabN28254lLly6Nzn+Z7Z353JmxmvPIhQsXBuMvv/xytEPj1q1bg3Hmfb/77ruD8dGjR1/8fPbs2cG23/zmN4Nxbs9c2b179w7GS0tLo3nzr7Nb5fjx46PvZb6v+V7lepGvbyvzPPep27dvv7FW/v73v6/ZfbFxZT5/7o95LpTzQZ3jct/OfTm7Klo9aKl3fpf3/eabb674vlLOLdnfc/jw4RX3BJ0+fXp0flx03/W8t/aV8Prl+1Hfq7w+ybk5j42c1+s1Rh6DeY2RHRq53mc3X81Yz8eV1xwvq/U3c03VDQPMTc5pOe71vP6/Mo/n/6Dqtc2ia82dO3eOXpPkeVaOXydnMAAAAAAAwCz4UAMAAAAAANhc8VP5FcLWV/0A5i7nuIyn+vDDD0ejTDIyqhczs3379tGvaD9+/HiwLb8qnn+rF0dVYxPyK9wZG/Oyel9HbK0X1hJe9bE9l68bZ/RSzkE1auTOnTuDbXms1/iJ565duzb6tzJuYvfu3c3j/erVq4Px8vLyYHz58uXR+Sm/Qp0xGxkZVSOlenFTGcWS952v7+uKY+nFkB06dGg0hizf93v37jVjSb7//vvmelLjgPJ9zqig1fjLX/6yZvfFxpWRCRnj1ItDq+Ocw/LcJ+et1jgjd/JxTo2CaMk4vzy+9+/fPxifOHFidE7LuKljx44114iMtptL5NSUNbt329b21fzuau+7p/5+L7KwdX3SO45yDcg1uXcNUq9ZcluuPy8r4zJbr+1c9nGAl42behbb69yb5+o5h2d8ZsZV1XFen+Ra9Dqvr830AAAAAADALPhQAwAAAAAAmAUfagAAAAAAAJurU6NmIC/K7qqZhTLRgbnp5QBm5nLm0Nbfz/6NPXv2NPMMW3nMU+fTXsdG/VvZlbRt27Y31sKVK1dW3MEk75aNbr0yQqfmtOexnXNQvX329OSxndn1t2/fHr2v7OPYsWNH83Fkr0OOa+Z2ZrkePHhwNE9+UYZ8zZiv3RLPHT58uNmhkdn2+XpPyclfz30o14ClpaXB+MCBA6OvV76vta9p0RqQHU618yDXwPzd1fj73/++4nVvo3bi0JfvXe5TOc55qs6B2ceRv5t/q5VB3evUWI08x8l5J4/nvXv3jh7f9edF55Y57+fzWMtj9lWuhXU85bZT72tql8paPs7UylDPfTnf1/zd1jVJ7kOZv57re3bTtI7ZPEZzvV+r9aL1WvqfFLAR1blp6vVgrgH/bYx7fXjZXZZz/FzOuf03CQAAAAAAmAUfagAAAAAAALPgQw0AAAAAAGBzdWp8/vnn6/tIADawzDPMHNkff/xxNIO+l2OcGcvvvffeijKlF2Uf5riVy79eebd//etfV9ypIe+WjW4t80Rb99XLgG/NOc/dvXv3xc/fffdd87aZk53zSr39999/P9j2q1/9qvm7T548af7t2u+RGfHZg3Hq1KnBOHsz6u/XeXPR65mPK8dT8tHTavPTp3R75Lxd95PsO/nwww8H48xPz/c2O5jqerKenSPffvvtYJzrwlwyfZlmSu9Fb3/cqLJTI8+zcj7NXox6fpivV86tOe/nuehGOd9aTVdFbx6akkU+Ncd8ynhqX0dve13/e3nqed2Q+0G9r9z/cpx9XLn/5nlJtV5rxl/+8peX/t2NcgwAjJnaqfGssX7ktjzPz/UhrwvqetKa718339QAAAAAAABmwYcaAAAAAADALPhQAwAAAAAA2FydGn/+85/X95EAbCC9DO9Wx0ZmnvfGmX+b26fkw2Z+cyvDtpXZuxp/+tOfmo+pPuZW3wash1eZz7+aDOlehmr259Tunnv37g223b9/v9mpkRmrtW+il8uejyvnr7x9zYx/5513BtuyEyLzvfM518eWf/fOnTtvTDElk7aXf76WGfG9fag+73yf873p5fnn9ny910v2m8BW7RHJufjBgwcvfr5+/XrzttnHsVHOp6Z2Eq1mLp6SPb6azoxF910fZ2vbonHvvuv5+dRz9bx9nW+zl6X3uDeCf/zjHyvezzfi4wdYy/OG/02Y5/K2ee2UPbB1nGvJRppffVMDAAAAAACYBR9qAAAAAAAAs+BDDQAAAAAAYHN1avztb39rZqRPyf/dSPlbAGuhzmu9OS7zX/P2Ncc8sw5//vnnwTjzDVeT4b9WPv/888FY3i1b1WqOx6kZ3HWu6M0bvXlk7H5f5jm+/fbbo38r87yzEyI7H7Lfo973avPjW/npq81Hb22fmtuez/Ott94a7cDIbpXsrsjbm49hbfW6kHIOzC6gOh/k/JidRNmJs1FM7dRozZe9uXbK3Lvaeb01Xm3vUj62Os59KP8nk/tBbq9rcPZr5Rqb6/9GuMa4fPnyYOwaA9hKplxz/N/E7tUc1/VmI8z/Y3xTAwAAAAAAmAUfagAAAAAAALPgQw0AAAAAAGAW/u9/Kwwb3LFjx4rvdCPnbQGsh9ZU2spA7+Ugt3LzV5LXvBovm0W7ffv2dblf2Mqm5JL3MlJfpcxvfffdd1/8vGvXrsG2999/fzBeWloajLdt27biLqOpHRutTPO17NDoPe7ee5fPq/aO5FqS3SmZl/7w4cNmvn+ra2Ut5/XV9qHAXGRPUPZi1Pkxr7/z3CrPJedqSt/EaufiKfc1dW6ecl+9+bLVrdRbz3PNzX2uzrd5TfHvf/+7Oc41ZTXz/sv+rvUCYGv5X2e98E0NAAAAAABgFnyoAQAAAAAAbK74KV/1A1gf+VXxVhxVxk2tZ6yMr4YD6znfvf3224NtGa/Sis1Y7/ipVpzX1CiR1vbeffUioGokV8ZzpYwa+de//tXcPoX4KZguI6Ra4zw3nOtxM3WumDJ/9ra3bt87n57yuFcbs9p63HkdkLfN/STHdQ3uRWxlpO1axse6xgBgJcRPAQAAAAAAm4IPNQAAAAAAgFnwoQYAAAAAADALOjUAWEjeLbCR+oV6+d+t+5pqNX1Fq8kdX83vZh5/vgb5nHp9HVPo1ABgpVxjALASOjUAAAAAAIBNwYcaAAAAAADALPhQAwAAAAAA2FydGgAAAAAAAK+Tb2oAAAAAAACz4EMNAAAAAABgFnyoAQAAAAAAzIIPNQAAAAAAgFnwoQYAAAAAADALPtQAAAAAAABmwYcaAAAAAADALPhQAwAAAAAAmAUfagAAAAAAAG/Mwf8H71hTAst2I5QAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "show_design_gallery(dataset, problem, n=8, seed=SEED)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Step 6 — Explore the dataset interactively\n", + "\n", + "**Drag the sliders** to filter designs by condition range. This is the dataset your generative model will learn from." + ] + }, + { + "cell_type": "code", + "execution_count": 87, + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "b436e2c3c6084c0da018e6a249e36c25", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HTML(value='

Explore the dataset — drag sliders to filter by condition

')" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "4943216cde4543099009c1189602749e", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "VBox(children=(FloatRangeSlider(value=(0.15, 0.4), continuous_update=False, description='volfrac', layout=Layo…" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "e5454d8cb5624b19900ed1c66c0ad5b0", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Output()" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "interactive_condition_explorer(dataset, problem)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Step 7 — Render a single design\n", + "\n", + "EngiBench problems have a built-in `render()` method that draws the design with physics-aware styling." + ] + }, + { + "cell_type": "code", + "execution_count": 89, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAmkAAAF5CAYAAADET73UAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjEsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvc2/+5QAAAAlwSFlzAAAPYQAAD2EBqD+naQAAXrhJREFUeJzt3Ql4U1X6P/A3SfedUvaygyyCgGyyKyKIDgIzyKosKgqIozLwk4qAqFBARVBZ1BFQBwEXcGTEKusgICqLMOxUlrIVSind1+T+n/f4T0l6btLcpGmT9Pt5nvu0OclNTrZ735zlPTpFURQCAAAAAI+ir+gKAAAAAIAMQRoAAACAB0KQBgAAAOCBEKQBAAAAeCAEaQAAAAAeCEEaAAAAgAdCkAYAAADggRCkAQAAAHggBGkAAAAAHghBGoCXevXVV0mn01VoHR566CEaP348ebuxY8dSgwYNyv1xU1NTKTQ0lDZv3lzujw0Ang9BGoCTVq9eLYIk8xYUFES1a9emfv360bvvvkuZmZnky/bs2UM//vgjvfTSSxVdFY9069Ytevrpp6latWoiELvvvvvo4MGDVrepWrUqPfXUUzRz5swKqycAeC4d1u4EcD5IGzduHL322mvUsGFDKiwspOTkZNq5cydt2bKF6tWrR99++y3dddddbnn8oqIisXFwWBEGDRpEubm59MMPP5AvtKTx+3b+/PkyuT+TyUQ9evSgw4cP07Rp0ygmJoaWLVtGFy9epAMHDlDTpk2Lb3vixAlq2bIlbdu2jXr37l0mjw8AvsGvoisA4O369+9PHTp0KL4cFxdH27dvp7/85S/0yCOPiJNwcHBwmT+un5+f2CrC9evX6bvvvqMVK1ZUyON7uq+++or27t1LX375JQ0ZMkSUDR06lO644w6aPXs2ff7558W3bdGiBbVq1UoE/QjSAMASujsB3IBPttyFdeHCBfrXv/5ldd3JkyfFiTs6Olq0gnGAxy1ulrhVbs6cOaLFhW/D3WLdu3cXLXT2xqRxy9bf//530XITHh4ugsTLly+L2/HtS+6bmJgoWpGioqIoMjJStAzm5OSU+vw4QONWvD59+miu95EjR8RjNmrUSNymZs2a9MQTT4jxWZbMdTx9+jQ99thjon7cdcivK3cAcKvUwIEDKSIiQtzH22+/bbU/t4zx/uvXr6eXX35Z3Ia7Hfk14X0daQ1bvHgx3XnnnaKeNWrUoGeeeYbS0tIcCtL49n/961+Ly7juHKj9+9//pvz8fKvbP/DAA7Rp0ybxvAAAzBCkAbjJ448/Lv7yuC2zY8eO0T333CNa16ZPny4CCw4cuOtw48aNVgEKBzs8jun999+nGTNmiO7TkmOaSuLg57333hMD+hcsWCBa8B5++GGbt+eggcfOxcfHi/+5NYcftzTcSsQBWP369a3KHak3B2xnz54VASHXdfjw4bRu3TpRZ7UgZdiwYSJgmj9/PnXu3JneeOMNETxxYFOnTh3xPJs0aUJTp06lXbt2SfvPnTtXBJU8do4DWH58Di45oLWHAzLuquzWrRstWbJE1HfNmjVizCEHo/YcOnSI7r77btLrrQ+xnTp1EkEwB56W2rdvL8aw8ecDAKAYj0kDAO1WrVrFEYXy22+/2bxNZGSk0q5du+LL999/v9K6dWslLy+vuMxkMildu3ZVmjZtWlzWpk0b5eGHH7b7+LNnzxaPb3bgwAFx+YUXXrC63dixY0U5377kvk888YTVbQcPHqxUrVq11OfevXt3pX379lK5I/XOycmRytauXSvqs2vXLqmOTz/9dHFZUVGREhsbq+h0OmX+/PnF5WlpaUpwcLAyZsyY4rIdO3aI/evUqaNkZGQUl3/xxReifMmSJcVlvF/9+vWLL//000/iNmvWrLGqZ0JCgmp5SaGhodJry7777juxP9+Ppb1794ry9evX271fAKhc0JIG4EZhYWHFszxv3rwpxqqZW69u3LghNu7m49aZM2fOiK5Jxt2P3KrCZY5KSEgQfydNmmRV/txzz9ncZ8KECVaXebA71ycjI8PuY/FtqlSpIpU7Um/L8Xl5eXniNeDWRabWUsizH80MBoPoHuYWtyeffNLqcZs1ayZa6EoaPXq06Po1467mWrVq2U17wWPJuHuVW+vM7xNv3OLF7+mOHTvIHm6lCwwMlMrNkzxKtuKZX0t+DAAAMwRpAG6UlZVVHCDw+C8OLnhMFY9Pstx4MLl5QD7jGaPc/cUDzVu3bi263Xgslz08/o2713imqSXuCrSFuyLVggVHxl2pdU06Um8OVp9//nkxZosDNn7+5jqnp6eXWkcOnjjY4XF3JcvV6m05k5LxODV+TezN5OQgk+tSvXp16b3i99T8PtnCz6vkuDNzUGq+Xu21rOi8dwDgWTC7E8BNLl26JE705iCJx1UxHjvFLWdqzLft2bMn/fHHH2KQOY9p++c//0nvvPOOmE1p2bLkKm6ZUlPaAHYej6YWEDlSb25J5DFtHMC1bdtWtEzxa/Pggw8Wv0al1dHZejuK68EBGo9BU8PBmj3cUnf16lWp3FzG+fQsmV/LkoEnAFRuCNIA3OSzzz4Tf80BGc9mZP7+/tKsSDU8+5MHq/PGrTccAPHAfFtBGg/i5+Di3LlzVq1H3IJX1po3b05ff/215npzMML5wHhywaxZs4r30dKtq1XJ++ZAjl8Te/nrGjduTFu3bhWTBpxJn8LB508//STeD8vJA7/88guFhISIlkZL/J6Z03EAAJihuxPADXjs2euvvy668UaNGiXKuGXm3nvvpQ8++EC1lSUlJaX4/5LpKLi1iVvZ1LrQzMzBICdNtcQzKMtaly5dRMBVcgxYafU2t4CVbPHi2Zru8umnn1qt/sDpMfj15/x2tnBrn9FoFO9hSZx6hLt07eFxb9euXaMNGzYUl/F4Mx7rNmDAAGm8Gie45e5aTvcBAGCGljQAF33//fci9xmfvPnEzAEap3ngli3Of2a5IsDSpUtF3jAer8VrXnLrGu/z888/i+5RzlDPOAM9B3Q8UJ1bpvbv3y+Ci8mTJ9usB9/2b3/7mwh4OFjiwfj//e9/i9M9lOV4J07rwYl0ubWJlz4yK63enNOMW9YWLlwo0lhwCg3uFjW3JLkD14Nfc27Z49eaXx8OHO2tOdqrVy+RgoNTk/z+++/Ut29f0QLKrXIcaHFKDnOSWjV8Hb/+/JjHjx8vXnGAAz+1FCf8eeHgDWPSAMASgjQAF5m77QICAkRAwAEYBwJ8gracVWgOYjhw4RM15yTjYIpb2Nq1a2fV/cf5vDjA4wCGW6E44OP8YDyOq7RWI07aunbtWpF3jbtVOZkrz3wsy+WjeNA/5zX74osvrII0R+rN2fZ5xikHrNyixgEQB7olx2mVFU5ky5MXOODiFrX7779fBEzc7WgPj6PjYJNbPvk+OCjlRdg5sS53g9rDLYY8e5SfN6/jyrM5O3bsKN5zfi8scYB/9OhRt7YmAoB3wtqdAD6OW4I4COSVD8xdr2WBx1xxqxkHGSVnUHoCXnGAk+paLs3kiV544QWRhJe7PNGSBgCWMCYNwIeoZdHnFhoevM7djGWJc6pxKxh3XYJzuCWVZ8ByayMCNAAoCd2dAD6EAyZukeEWJO6e425E3rhLsm7dumX+eHzf4DxOZcIzYAEA1CBIA/AhXbt2FYPQeVYin/w5ESynv+A1NAEAwLtgTBoAAACAHTxu9M033xQ9FZzChydmDRo0qNRxsVOmTBFL5XFPxiuvvEJjx44lLTAmDQAAAMCO7OxsatOmjZiV7ghOK8SpinjoCU/e4glCnND7hx9+IC3QkgYAAADgIJ7kU1pL2ksvvUTfffedSK9jNnz4cJEIOyEhwdGHQksaAAAAVD75+fmUkZFhtdlb1UULTlBecvk/XhWGy71y4kD3Af+t6CoAAEA5Co2OlMrq3hErlbW7W154vmuTG1aXm13ZKt0mZ9sWqSxpzympLPWovMxXwc1CG7WGkh4ulF/T8vKdv3VyaC1+mzFCWgFk9uzZYrKVq5KTk0XSb0t8mQNBTpXk6JrAmoM0Xn9u5cqVIhrkSjDOcM6zynhAXLVq1bTeJQAAAIBmOn/n8wvGxcWJgf2WSq6rW9E0BWm//fabaK7j5VS4Ge+OO+4Q5bweHi99Mn/+fDEorkOHDnbvh5sTSzYpmowFpDcEOPMcAAAAoBLS+zkfpHFA5q6gjBuvODayxJd5/WJHW9E0B2m83t6jjz4q1rQrmR2b5x9MmDBB3Ka0PldeQ69kE2PdpmOoXrNxWqoDAAAAlZjO3zOH1nfp0kWs32uJc1hyuRaant3hw4fpxRdfVF2+hMv4Op5q6kgTY3p6utUW26Ts1hQEAACAytGSpndy04KTg3N8Y45xOMUG/5+UlFQc14wePbr49txodfbsWfq///s/sb7xsmXL6IsvvhBxkhZ+Wpvvfv31V2revLnq9XxdyYFyjjYxoqsTAAAAPNH+/ftFzjMz81i2MWPG0OrVq0WCW3PAxho2bChScHBQtmTJEoqNjRXr9PKQMbcFaVOnThVrAHLG3fvvv784ION+1m3bttFHH31Eb731lqYKAACA7wuOCJPKajWsJZU1bxEtlbWuly2VNcw6YnVZOfk/6TYZSdZjgljuzVypzJhrtFFr8OWJA1rce++9YliXLRyoqe1z6NAhcoWmIO3ZZ5+lmJgYeuedd0TTndH45wfbYDBQ+/btRSWHDh3qUoUAAAAA3D1xwBtoTsExbNgwsRUWFop0HIwDN39/f3fUDwAAAKBCW9IqitPJbDkoq1VLbqoGAAAAKA96tKQBAAAAeB6dwbeDNM9MMAIAAABQyaElDQAAylRQWKhUVqtRbamsRSt5GcEOd8gLXLcu2i+VBR7da3U59X+npdvcPJcqleVcke/fmGuSysA76H28JQ1BGgAAAHglnR5BGgAAAIDH0Rl8e9QWgjQAAADwSnp0dwIAAAB4Hp2Pd3f6djshAAAAgJdCSxoAAJTpTM6aKjM572zz51rPlu5pkSeVtcvfI5UZftsplV377ajV5eT/XZVuk54or/lZlFEklYH30qO7EwAAAMDz6BCkWTtx4gTt27ePunTpQs2bN6eTJ0/SkiVLKD8/nx577DHq3bt3qffBt+XNkslYQHpDgNbqAAAAQCWl0/v2qC1Nzy4hIYHatm1LU6dOpXbt2onLPXv2pMTERLpw4QL17duXtm/fXur9xMfHU2RkpNV2KXGNK88DAAAAKuHEAZ2Tm88Faa+99hpNmzaNUlNTadWqVTRy5EgaP348bdmyhbZt2yaumz9/fqn3ExcXR+np6VZbbJNRrjwPAAAAqIRj0vRObj4XpB07dozGjh0r/h86dChlZmbSkCFDiq8fNWoUHTlypNT7CQwMpIiICKsNXZ0AAAAALoxJ0+n+jD71ej0FBQWJrkqz8PBw0SoGAAC+xz84SCqrVre6VNbyLsdmct6du0sq0/0iD5m5vFv+8Z/8v2tWlzNP5dioNfgynZd0W5ZLS1qDBg3ozJkzxZd//vlnqlevXvHlpKQkqlWrVtnWEAAAAMDGxAFnN59rSZs4cSIZjcbiy61atbK6/vvvv3dodicAAACAq3Q+3pKmKUibMGGC3evnzZvnan0AAAAAHOItEwCchWS2AAAA4JV0Pt6S5h2dsgAAAACVDFrSAABAojawOjKmilTWsJk8k/PuZrfHLpu1Kdjn2EzOXYelsisHk6WynPPybFGofHReMgHAWQjSAAAAwCvpfLy7E0EaAAAAeCUdgjQAAAAAz6NDkAYAAADgeXQ+PibNt58dAAAAgJdCSxoAAEiCwkKkshr1YqSyFk0DpbK2QYeksoB9P0lll/ce9aqZnIZguV3DEGyQykxFilSmFJocvJ1cBrYhmS0AAACAB9JhTBoAAACA59FhTNptBw8epHPnzhVf/uyzz6hbt25Ut25d6t69O61bt86h+8nPz6eMjAyrzWQs0F57AAAAqNQtaTonN58L0saNG0d//PGH+P+f//wnPfPMM9ShQweaMWMGdezYkcaPH08rV64s9X7i4+MpMjLSaruUuMb5ZwEAAACVjs7HgzRN3Z1nzpyhpk2biv+XLVtGS5YsEYGZGQdqc+fOpSeeeMLu/cTFxdGUKVOsyh4c/ou2mgMAAAD4ME1BWkhICN24cYPq169Ply9fpk6dOlld37lzZ6vuUFsCAwPFZklvCNBSFQAAAKjkdD4+Jk1TkNa/f39avny56Ors1asXffXVV9SmTZvi67/44gtq0qSJO+oJAABuovczOLSYer0GkVJZixppUlnUuQNS2Y39crqNa0fdm25D5y93aen95DK/CPlU6B8uvyb+of4OpYAwFsgLzOenFUplBTflMiNScGii85Juy3IJ0hYsWCAmCnCAxmPR3n77bdq5cye1aNGCTp06Rfv27aONGze6r7YAAAAAlaQlTdOzq127Nh06dIi6dOlCCQkJpCgK/frrr/Tjjz9SbGws7dmzhx566CH31RYAAADATKdzfvPFPGlRUVE0f/58sQEAAABUFJ2Pd3f6djshAAAAgJfCigMAAADglXQ+PiYNQRoAQCUXGBIslVWtFSWVNa0vdy01KjgmlRUcl2dy3jgtz+TMOJND7lz8PLiOvPh7UJRcFhgup4AKipRfE0OAfMpUTPLC6fmZ+VJZpiFLKjPmYtF1V+l8vLsTQRoAAAB4JR1a0gAAAAA8jw4taQAAAACeR+fjQZpvtxMCAAAAeCm0pAEAAIB30vt2WxOCNACASr5OZ3hVeU3O2PoRUlmj6HSpLCRRZU3Ok+elsvSLmWU6a7HkbM6QekHSbcJqhMpl1cOkspCq4VJZQKS8r94gv3bG/AKpLPuavJ5pQbZ8u7xgeRZoofwygR06L1k5oNyCtKtXr4pF1nfv3i3+1+v11KhRIxo0aBCNHTuWDCofYgAAAICypvPxljRNz27//v1iMfXNmzdTYWEhnTlzhtq3b0+hoaE0depU6tmzJ2Vmlv4zID8/nzIyMqw2k1H+lQEAAABgb+KAs5szli5dSg0aNKCgoCDq3LmzWL/cnsWLF1OzZs0oODiY6tatSy+++CLl5eW5J0h74YUXxANwsPbTTz/R6tWr6fTp07Ru3To6e/Ys5eTk0CuvvFLq/cTHx1NkZKTVdilxjZaqAAAAQGWn1zu/abR+/XqaMmUKzZ49mw4ePEht2rShfv360fXr11Vv//nnn9P06dPF7U+cOEEff/yxuI+XX37Z8aenpYJcqccff7z48siRI0XZtWvXqEqVKrRw4UL66quvSr2fuLg4Sk9Pt9pim4zSUhUAAACAcrNo0SIaP348jRs3jlq2bEkrVqygkJAQWrlypert9+7dS926dROxEre+9e3bl0aMGFFq65vTQVr16tXFODQzDs6KioooIuLPAaZNmzalmzdvlno/gYGBYh/LTW+Ql+UAAAAAcEd3p9rQKy5TU1BQQAcOHKA+ffoUl/GYfL78888/q+7TtWtXsY85KOMeRx4u9tBDD7ln4gBPDpgwYQK9+eabItB6/fXXqVevXqKvlZ06dYrq1Kmj5S4BAKAcBQTJsyCja8jrdDauK08Ca1h4QiorOHNaKks7nyqVZV9wfByOIwzBhlLX3wyJDpHKQqvLM1mDVGa3+qmsZ6oqI8vpwexYp9N1Op3zEwd46NWcOXOsyrhr8tVXX5Vue+PGDTIajVSjRg2rcr588uRJ1fvnFjTer3v37qQoimjU4hjKbd2db7zxhmjiGzBgAN1///0i4rRs5uOpsPykAQAAANxOr3N6Uxt6xWVlZefOnTRv3jxatmyZGBq2YcMG+u6770QDl1ta0sLCwsSgN56ZwBEhX7bE/a0AAAAAnp6CIzAwUGyOiImJESnGeJiXJb5cs2ZN1X1mzpwpxvE/9dRT4nLr1q0pOzubnn76aZoxY4boLi2NU8+Op56WDNAAAAAAfDEFR0BAgEg5tm3btuIyk8kkLnfp0kV1H854UTIQM+eS5e5PR2DFAQAAAIBScPqNMWPGUIcOHahTp04iBxq3jPFsTzZ69GgxLt887IuHhvGM0Hbt2omcaomJiaJ1jcsdTfyPIA0AAAC8k678VhwYNmwYpaSk0KxZsyg5OZnatm1LCQkJxZMJkpKSrFrOOG8sj9Xnv5cvX6Zq1aqJAG3u3LkOP6ZOcbTNzc26D/hvRVcBAMDnx+tE1YyRytp0aSyV9e8krwLT/uJ6qSzl35ulsrM7/pDKshJzyVk6f7lrKiDa3+pyeF15JmdkrDxrM6yGXOYfKs94NQTKs0VNRqNUVpCRI5VlXpHX7ky7cEsqyzgtzww15prI2zxceKrCHjtj0QtO7xsxZTF5OrSkAQAAgHfS+/banQjSAAAAwCvpdM6twektfDsEBQAAAPBSaEkDAAAA76T37bYmp5/dpUuXKCtLHvRYWFhIu3btcrVeAAAAAB6RJ81rWtJ4gfWBAweKRUO5L5jXpuIlD8zJbXmB9fvuu0+scQUAABVHr5KLKSRcngVZs7r1TElWJ+C8VGZMOufQOp25l9UXqS6rdTr/LLNuY1BMcqKCvHR5RqmpSD43GQLkU6HeT35MY0GRVFaQLc+CzUmVZ3zmJOf5xEzOypyCoyJofnbTp08XeUB++eUXkR/k+PHjIihLS7s95dhDsnoAAACAL9M7v3anT7akbd26lTZu3Cgy7rI9e/bQo48+Sr179y5eLsHXZ1sAAABAxdOhJc0arxJfpUqV4su8OCmv7N6gQQPRonb9+vVS7yM/P58yMjKsNpNRbjIGAAAAqKw0B2mNGjWiI0eOWJX5+fnRl19+Ka77y1/+Uup98LpWkZGRVtulxDVaqwIAAACVmd63uzs1B2n9+/enDz/8UCo3B2q8llVpY9Li4uJEi5zlFttklNaqAAAAQCVf+kzn5OaTY9J4YdCcnBz1O/Pzo6+//losJGoPd5HyZklvkNdJAwAAALDJx8fAaw7SOBCLiIiwm6Jjzpw5tHLlSlfrBgAALvBTSS0RFimn4KgqrzlOUZmXpLK8q/KY46zr2U6nllBbOD24jvUPeBZeW65zcFRwqSkz1NJtFOYWSmW5t+T0GIpJfg5FeSoLrGfL95efIpcVZsrpO6AM6L2jRcxZZf7sOE/aJ598UtZ3CwAAACC3pDm7+WJL2rfffmv3+rNnz7pSHwAAAABwJkgbNGiQyINmb3IA8qQBAACAu+nQ3WmtVq1aIi+ayWRS3Q4ePOiemgIAAABY4mS2zm5eQHMt27dvL9bttKW0VjYAAACAMqH37Txpmrs7p02bRtnZ8mwesyZNmtCOHTtcrRcAALjI4C8vnB5RxXpWJKsWLq/4Eph6VSpLvX57jWaz/HTnV4tRm8lZo1WMVBbTvI68b81qVpd1Ks/VmCMvsF5wK0Muy5TTShVmyzM+c1KzpLKMK5kOze5UCtF44Q46L2kRK7cgrUePHnavDw0NpV69erlSJwAAAIDSeUmLmLN8OwQFAAAAqCwtaQAAAAAeQefbbU0I0gAAAMA76Xy7u9PlII1ncu7cuZMSExNFeo5+/fqRv8oATgAAAIAypUdLmpWHHnqI1q5dS5GRkWIJKL7866+/UkxMDKWmptIdd9xBu3btomrVrGfeAABA+Sb1DAwJksqiouQZldFB8oxH3Y1kqSwnNdPpdTr9IuTTTXRjedHQWh3ukMqCO3WWyjJrt7C6nO8fKt0mqECub1CuPEM1NEd+/vo0eZ3SgvPnpbLUI4lSmcl41aG1OzHjswzofDtI0/zsEhISKD8/X/z/yiuvUGZmJv3xxx90/fp1unDhgpjdOWvWLHfUFQAAAOA25Emzbfv27bRw4UJq2LChuBwbG0sLFiyg8ePH292PgzxzoGdmMhaQ3hDgSnUAAAAAfIZT7YTmtTnT0tKocePGUjLbK1eu2N0/Pj5edJdabpcS1zhTFQAAAKisdFgWSjJ27Fj661//SoWFhXTu3Dmr65KTkykqKsru/nFxcZSenm61xTYZ5UxVAAAAoLLS6ZzffLG7c8yYMcX/Dxw4kHJyrJfT+Prrr6lt27Z27yMwMFBsltDVCQAAAJrovaNFrNyCtFWrVtm9fvbs2WQwGFypEwAAaKRXOe6GhIdIZVWj5cN+jE6eyViUkiKV5aTK6zYXZcizFtWExsqzSqs2rSmVBbdrJ5Wda/SAVHbsRi2ry2mZ8snaT+VUFBYsz0aNjJKfQ+06N6Wy+rG/S2VV5YdQXeMz+6LKOqI35TU+QSOdd7SIOavMQ1BOyzFp0qSyvlsAAAAAaxiTpj1I++STT8r6bgEAAAAqFc3dnd9++63d68+ePetKfQAAAAAcgzFp1gYNGiRScPByUKWl6AAAAABwG51vxxuaQ1Ben3PDhg1kMplUt4MHD7qnpgAAAACVaEya5pa09u3b04EDB0T6DTWltbIBAEDZ8wuQD+dhkfLszugIed+ILHmtyfwb8uzGnJt5Dq3daQiWT4AhMXJdIhrWlsoy67eRyn5Plm/36yHrmaYpV1XW31RZ+icwWE73FBElr3HatLE885Qay+mlmjQ4LZWFVJXX8zQE35DvD1yn8+2WNM1B2rRp0yg7W56GbbniwI4dO1ytFwAAAIB9GJNmrUePHnav5wXWe/Xq5UqdAAAAACo9lxZYBwAAAKgoCro7AQAAADyQzre7O516dv/5z39o1qxZtGfPHnF5+/bt9NBDD9GDDz5IH374YVnXEQAAAECG2Z3WPvjgA5o8eTK1adOGlixZQkuXLhXLQA0bNkys2fnCCy9Qbm4uPf/88+6pMQAAOLR2Z1iEPGsxOkxeLzIgXV6782bKLaksP73AoboYglXWEY2WZ3cG1LJef5MlBdaXys5flmeQXjidbF3fK/JaozqVQeVqMz6DwuS6ETWUSprUqiKVNY6UV+/0C/JXuT9wBwXdndbeffddWrZsGY0fP17M4uQWtLfffrt4vc577rmHFi5ciCANAAAA3EvnHS1iztL87M6dO0f9+vUT/993331kNBqpZ8+exdffe++9dOHCBbv3kZ+fTxkZGVabyejYLzQAAACAykBzkFa1atXiIOzKlStUVFRESUlJxdfzddHR0XbvIz4+niIjI622S4lrnKk/AAAAVFbc3ens5ovdnbzSwJNPPkljxowRi62PHj2a/vGPf5BerxerDXCy2759+9q9j7i4OJoyZYpV2YPDf9FeewAAAKi89L7d3ak5SFuwYAEVFBTQunXrqGvXrvTee++JcWocvBUWFopEttxSZk9gYKDYLOkN8lIdAAAAALZg4oDKigIl02xMnTpVzPjkIC08PLws6wcAAABQKScOlFky26CgILFdvHiRZs+eTStXriyruwYAgFIEhgRLZeGR1j0WrGpwjlSmPy+n4MhPz3ZoMXU1agusB0XK9aNIOaXFrQL5h37aTXliWW6G9fMoynd+8plO5USflyOnKik0BlS6IMHTKT7++pf5s7t58yZ98sknZX23AAAAAJWK5pY0nixgz9mzZ12pDwAAAIBjynlMGifwf/PNNyk5OVkk9edx+Z06dbJ5+1u3btGMGTNow4YNohGrfv36tHjxYpFj1i1B2qBBg8QsTkVRbN6GrwcAAADwle7O9evXi8wUK1asoM6dO4tgi/PGnjp1iqpXry7dnidZPvDAA+K6r776iurUqSPSlEVFRTn8mJqfXa1atUREaDKZVLeDBw9qvUsAAAAAj86TtmjRIrHa0rhx46hly5YiWAsJCbE5Bp/LufXsm2++oW7dulGDBg1EBgxugXNbkNa+fXs6cOCAzetLa2UDAAAAqOgF1vNVVj/iMjXcKsaxT58+fYrLOD8sX/75559tDg/r0qULPfvss1SjRg1q1aoVzZs3T6zU5CjN3Z2crDY7W571Y9akSROxpicAALiHwV9ewDs4TJ49GV1Fvl20/02pzJgiz+7MvuH87E41ej950XXyl2dLmlR+4/v5y+0JgSHWi8eblEjpNorKnSkm+TmERIRJZeGR8uL0IQHyjE9dQZ5UZipy/CQMFZcnLT4+nubMmWNVxtkpXn31Vem2N27cEMEVB1uW+PLJkydtjtHfvn07jRo1ijZv3kyJiYlinXNOV8aP45YgrUePHqXmUePmPAAAAABPFaey+lHJRPuu4CFgPB6Nc8saDAbRE3n58mUx8cBtQRoAAACAR9A5P3FAbfUjW2JiYkSgde3aNatyvlyzZk2bY/j9/f3FfmYtWrQQM0O5+zQgoPSVlnw7CxwAAAD4LIV0Tm9acEDFLWHbtm2zainjyzzuTA1PFuAuTr6d2enTp0Xw5kiAVuZBWlpaGn366adleZcAAAAANlNwOLtpxV2jH330kUjYf+LECZo4caIYo8+zPdno0aNFF6oZX8+zO59//nkRnH333Xdi4gBPJHBUmXZ3JiUlicpyRQEAAADcSld+HYLDhg2jlJQUmjVrluiybNu2LSUkJBRPJuAYiGd8mtWtW5d++OEHevHFF+muu+4SedI4YHvppZfcE6Tx9FR7MjMztdwdAACU0UzJkHCV2Z2RcpdOVE6yVFZwPVUqy03LlcqKMoo01tRi3zyVtTUz0qSiao1uSWVNG9WRynS6+laXc1TW2nQ0G1RoqDwLtmkjeaxSraAkeee0FKmoIFt+rqYipKbytNmdzpg8ebLY1OzcuVMq467Qffv2kbM0BWmcJdfeagKcHw2rDQAAAAC4TlOQFh4eLtag4uUQ1Jw5c4aeeeaZUu+Hk8WVTBhnMhaQ3uDYQDoAAAAApRy7Oz0+SLv77rvFX1t50LilzZHVBtQSyNVtOobqNftz8B0AAABAqXy8905TCDpy5EgKCpKzMJtxrhBHErTx7If09HSrLbbJKC1VAQAAgEpOKcfZnR7fksYLi9rDMxwcCdLUEsihqxMAAAC0UDTmO/M2WHEAAMDL6PXyiSkgSJ6hGBEir1MZmC3P5My6Jc/Mz7ulvtC0I9TW+FRbCzT/3HmprF7t36QyQz15Lcw7akRbXS4wyjNeTQ6ewIMM8szQGoFXpLLal+W65Saek8qyUzLLdN1TsM1bWsScpfnZ5ebm0u7du+n48ePSdXl5eUhmCwAAAFDeQRpnzOV1p3r27EmtW7cWEwiuXr1afD2PLTNn3gUAAABw+8QBnZObrwVpnCW3VatWdP36dTp16pRIycFrU3GWXQAAAIDypJDe6c3nxqTt3buXtm7dKlaD523Tpk00adIk6tGjB+3YsYNCQ0PdV1MAAACAClxxoLzptY5H8/O7Hdfx6gLLly+nAQMGiK5P7g4FAAAAKA8KUnDc1rx5c9q/f78Yl2bp/fffF38feeSRsq0dAAA4xGCQTzr+BnlGoT4nTyorys0v09mIhZnyGp8Zl+W1n28cSZTKYhT5cevVkxsA6kVaz+4kvTy70+FxR0aVNUnTVNYzPSvPRr1+5KxUlnElq0zXPYXKm4JDUyg5ePBgWrt2rep1HKiNGDHCoRUHAAAAAKAMgzReKWDz5s02r1+2bBmZTMgFAwAAAO6noLsTAAAAwPMomDhQut69e9OFCxfK4q4AAAAAHB6T5uzmcy1p3377rWr5rl276D//+Q/VrVtXXMYEAgAAAHA3xUu6LcslSBs0aJBIu6E2OeC5554Tf/l6o1FeZw0AAMqXak+QST4+K2U8llgplM8RWUnyrNJrhutSWUF2gVQW+sdlqcwQKK9V6gidXu/Q81eb8ZpxJV0qS78ol2WdzXWqbgAlaQpB+/XrR/3796fk5GQxQcC8GQwGOnr0qPgfARoAAACUB8XHuzs1BWnff/893X///dShQwfRvems/Px8ysjIsNpMRvnXEwAAAEBlnd2puZYvvviiGJvG63g+88wzlJOTo/lB4+PjKTIy0mq7lLhG8/0AAABA5aWgJU3Wtm1bsfIAjz/j/7UmsOV8a+np6VZbbJNRzlQFAAAAKinFx1vSnM6TFhwcTCtWrBCtary4Oi+47qjAwECxWdIbApytCgAAAFRCipe0iFVYMltOt4GUGwAAAABlS3N7X25uLu3evZuOHz8uXZeXl0effvppWdUNAAAAwO6KA85uPheknT59mlq0aEE9e/ak1q1bU69evejq1avF1/PYsnHjxrmjngAAAABWFEXn9OZzQRrP6GzVqhVdv36dTp06ReHh4dStWzdKSkpyXw0BAAAAVCikd3rzuTFpe/fupa1bt4pJArxt2rSJJk2aRD169BCTB0JDQ91XUwAAAIBKNHFAr3U8mp/f7biOU3AsX76cBgwYILo+uTsUAAAAoDwoPp4nTVNLWvPmzUV+NB6XZun9998XfzHLEwAAAKACgrTBgwfT2rVr6fHHH5eu40CN1+7k3GkAAFDxVPOM6w0OLTruCp2/3EoRVi9IKqvRsrpUFtO6kVQWWK+u/CCR0aU+L/UV5lUYi+SytFSpKOLseaksIPSsVGYyygvHZ57SvjoPlM5bWsScpde6UsDmzZttXr9s2TIRqAEAAAC4m4LuTgAAAADPo3hJKo1yaUn7+uuvnVpQHQAAAKCsKT7ekqYpSHv00UepVq1a9PTTT9Mvv/zivloBAAAAlAJBWglTp04VMzy7dOkiEtsuXryYUlPlAZYAAAAAUI5j0p555hmaOXMmHThwgD7++GOaM2cOTZ8+XaTfGD9+PD3wwAMuVAcAAJxhNMqTtgqN8u9wU4A8y9IvOFAqMwQ7P+PTP1w+tUTUiZDKYu5qItel231SWVL1u6WylHzr2Z0FRnl2p8nB1pIggzy7s0bTFKmsdp3fpDJ5fipRbpo8LCj3aoFUVpShMqsUNFG8pEXMWU5/C9u3by9mc/LanR999BGlpKTQgw8+SA0bNizbGgIAAABUwrU7NbWk8QoDJQUFBYm8abwlJibSqlWrSr2f/Px8sVkyGQtIbwjQUh0AAACoxExoSbtNUc2MeFuTJk1o7ty5pd5PfHw8RUZGWm2XEtdoqQoAAABUcgomDtx27tw5qlatmssPyklx09PTrbbYJqNcvl8AAACoPBR0d95Wv379MnnQwMBAsVlCVycAAACAC7M7c3NzxczO6OhoatmypdV1eXl59MUXX9Do0aO13i0AADjIZJKHnhTkFUplGTlyZ0l+TFWpLDAqXCoLipJnfGZRrkP1U5sZGhoTKj9uwwZS2bnqHaWyfUl1pLLEc9bjmnNy5Odfygid23UL9ZfKmjaSJ8F1rSvPIK3XRF7PM/T4BanMEHxLKivKcKx+YJu3dFuWS3fn6dOnqUWLFtSzZ09q3bo19erVS8zuNONuy3HjxrmjngAAAACVqrtTU5D20ksviQS2169fp1OnTlF4eDh169aNkpKS3FdDAAAAgEo4cUBTd+fevXtp69atFBMTI7ZNmzbRpEmTqEePHrRjxw4KDZWbswEAAADcQfGSFrFyaUnj8Wh+fn5WedOWL19OAwYMEF2f3B0KAAAAUB5MLmw+15LWvHlzsW4nj0uz9P7774u/vDQUAAAAAJRzkDZ48GBau3atWF2gJA7UTCYTrVixogyqBQAAtpiKjFJZTqY88/Jmujy98Va9mlJZzeryjM/gKsFSmV9EltPrT/oFqaRZiqgiFaXkRUllZ85az+Rkxw9Yz6DMzZbXy1RUZsEqJrkNJSQiTCozGuWUUw2rxUhl9arIuUMDQuXnqvfz7W65iqKgu9M6Ce3mzZttXs9reXKgBgAAAOBuCiYOAAAAAHgeBS1pAAAAAJ5HKeeWtKVLl1KDBg0oKCiIOnfuTL/++qtD+61bt05Mthw0aJCmx0OQBgAAAF7JpDi/abV+/XqaMmUKzZ49mw4ePEht2rShfv36idyx9pw/f56mTp0q0pWVS3cnR44///wzJScni8s1a9akLl26UKdOnZy5OwAAAIBylZ+fL7bS1hY3W7RoEY0fP754ZSWeKPndd9/RypUrafr06ar7GI1GGjVqFM2ZM4d++uknunVLXh6szII0jhb/9re/0Z49e6hevXpUo0YNUX7t2jV68cUXxeoDX3/9NVWvXl1TJQAAwHHGQnmdytwsldmdafLtbhZGS2V1qlV3aK1NQ3Cq0+tPqs1IpcICqUiv0gtVVChPSMvPybO6nH0znZylNgs0M936/llOgfyaKAFBUpneT17jE9xDcWECQHx8vAieLHEr2auvvirdtqCgQKxbzhMozfR6PfXp00c0Wtny2muviZjoySefFEGaVpqCNF5dgKPCEydOULNmzayu42WinnjiCXr22Wfpyy+/1FwRAAAAgPKaOBAXFye6Ly3ZakW7ceOGiH/MjVNmfPnkyZOq++zevZs+/vhj+v33352uo6Yg7YcffqBdu3ZJARrjsnfffZfuvfdep5oYTcYC0htU8ugAAAAAqFCcGFvmSNemqzIzM0VO2Y8++kgso1kuQRo/mYyMDLuVcuQJqzUx1m06huo1+7OfFwAAAKA0pnLKd8aBlsFgEMO7LPFlHpdf0h9//CEmDPCymcV1/f95ZHl5Te59bNy4cdnO7hw2bBiNGTOGNm7caBWs8f9cxoPpRowY4VATY3p6utUW22SUlqoAAABAJacoOqc3LQICAqh9+/a0bds2q6CLL/PESbVlNP/3v/+Jrk7zxktn3nfffeL/unXrln1LGs9s4EoNHz6cioqKRKXNA+o4MuSBcW+99ZZTTYzo6gQAAABPNWXKFNFQ1aFDB5HNYvHixZSdnV0823P06NFUp04d0VvIedRatWpltX9U1J9LnpUsL9PuzuXLl9OCBQvELAfLFBwcYUZERGi5OwAAKCP5OfLszsx0ec3L1NwQqcxURZ7dGRipNrvTsc4XY648GzMvXa4fpadJRVEBmVJZlWh5fczgCOvnkZuVLd1Gp5frq1eZPhoYKs/QDArxl8r8DSrLHipYCtFbx6Rpxb2JKSkpNGvWLBH/tG3blhISEoonEyQlJYkZn2VJc540ntm5b98+0bzHzXY8q2HJkiX02Wef0WOPPUa9e/cu0woCAAAAqCnvNTgnT54sNjU7d+60u+/q1avdG6RxxDhw4EAKCwujnJwcMQ6Nm/c46y53g/bt25d+/PFHBGoAAADgdqZybEmrCJra5Tgp27Rp0yg1NZVWrVpFI0eOFNl3t2zZIgbP8XXz5893X20BAAAAynnigFcEaceOHaOxY8eK/4cOHSpSbgwZMqT4el764MiRI2VfSwAAAACVMWnObt5A8wg3XsVd7KjXi9kLkZGRxdeFh4eLdBoAAAAAUI5BWoMGDejMmTPFl3m9Kl7D04xnNtSqVcvFKgEAAAA4lszW2c0baJo4MHHiRLF2lVnJXB/ff/89Jg0AAFQAk8Wx2SwrQ14k/GZWmFRWECGn4Aiu9mdOJ0uBkXI+yxySH8OYK9cl52aO/LhXr0plNfIvSGUN6livl8iu32Gd5T0kPMSxdBvB8nOIiJJTcDRtLKcgqRnyZ9opS7qzKovO58kL24N7KF7SbVkuQdqECRPsXj9v3jxX6wMAAADgEG+ZAFBuedIAAAAAPIEJLWkAAAAAnkfx8SBN8+xORVHo3LlzYu1O87qd69evp08//ZRu3LjhjjoCAAAAVDqaWtJOnTpF/fr1o4sXL1KjRo3E6gKPPvqoWBqKg7eQkBDau3cvNW3a1H01BgAAAKDyXxbKo4O0l156SSwBtWnTJlq5ciU9/PDDdMcdd4hUHLwsFAdsvCoBr+MJAADlp6jgz94NS1np8ozKmxnyvhk15NRJ0THRUllItDwLMiM4y6EF1nNuyHXJOHdFKou5cFgqa9ssVirz72hd57RMedaqn0EqorBguW6RwfJrVztUnslZ/9bvUlnB+fNSWU6qY68JuM6E7s7buJVszpw51Lp1a3rjjTdEC9rUqVPJ39+fAgMDafr06bRr1y731RYAAACgkqw4oKklLSsri6Kj//x1FRoaKjbL5LV169ala9eulXo/+fn5YrNkMhaQ3iDnrwEAAABQ4y3BVrm0pNWuXVusKmC2cOFCql79dhLElJQUqlKlSqn3Ex8fL5aTstwuJa7RWncAAACoxEyKzunN54K0Pn36iC5OyxUIeL1OM55IcPfdd5d6P3FxcWKNT8sttskorXUHAAAA8FmaujtXrFhh9/phw4bRmDFjSr0fHr/GmyV0dQIAAIAWio93d2pOZnvixAnat28fdenShZo3by5a1pYsWSLGmD322GNYuxMAwEPW7szJlGdUpt6UZzLeUOS1O6tXqyaVhVSV17P0i7gllRlzC6Sy7Ev5cl3OyDMoQ2seksoa+ss/4mNqt7C6nF9FrltQQaZUFpibJpUZbslTXvXnrjs0kzP1SKJUln5ZftzCTPl1B9cpCNJuS0hIoIEDB1JYWBjl5OTQxo0bafTo0SItB6fg6Nu3r+jyRKAGAAAA7mby8SBN05g0zoE2bdo0Sk1NpVWrVtHIkSNp/PjxtGXLFtq2bZu4bv78+e6rLQAAAIDFAuvObj4XpB07dozGjh0r/h86dChlZmbSkCFDiq8fNWoUHTlypOxrCQAAAFDJ8qRpXrtTp/sz+tTr9RQUFCTSZ5jxTE+eqQkAAAAA5RikNWjQgM6cOVN8mZeDqlevXvFlzqFmmdwWAAAAwJ1j0kxObj43cYDzohktZhC1atXK6vrvv/8ekwYAACqAYpLXhszPyZPKbt2SZ1nezJPXvVRiakplIVVv58U0MwQ79lu/KEOe3XjzD7nnxT/4tFQWk5UtlQXX/J/1ZX9/6TbGnFypLE9lJmeByizYwuw8h9bkzLgiz+TMvig/rlLoJVGBl1F8/GXVFKRNmDDB7vXz5s1ztT4AAAAADkGQBgAAAOCBTAjSAAAAADyP4uNBmubZnQAAAADgYS1pvPQTp97w//8DNP/44w9auXKlmNVZv359evLJJ6lhw4buqisAAABAMZX5MpU3SOvXrx9NnjxZJLDds2cP3X///dSsWTNq0aIFbd68md555x3aunWrWNcTAAAqlrGwUCrLSJNnHqZkyute5kfJ6ZSCq1eRygIj5XU1c0ieGakm97I80/Qa3ZDv76Y8+zI46pLVZb2fQbqNqUhez7QoX55lWpRvdGi2bFGefLuCbPk1VqPzlzPcY8an6xQffwk1BWmHDh0S63SyGTNm0KRJk2jRokXF18+cOVMsDbV79+6yrykAAABAJQrSNI1J4xxp5jxpJ0+epDFjxlhdz0tGHT582KFu04yMDKvNZCzQWncAAACoxEw+nsxWU5DWuXNn2rRpk/i/cePGUkD2+++/U3R0dKn3Ex8fL5aTstwuJa7RWncAAAAAn6Wpu/ONN96g/v37U3Z2No0YMYL+8Y9/iGWieEzaqVOn6N1336W4uLhS74dvM2XKFKuyB4f/or32AAAAUGkpLvV3yuMEvTpI4wkBvPQTB1i//PJnUDV37lzxt3bt2vTqq6/S888/X+r9BAYGis2S3iAPPgUAAACorGPSNCez5UCNF1ZPSUmhs2fPkslkEouq8+LrAADgOYoK5JmMWenyTMlUeQlNulUjViqrVqu6VBZWXZ4ZmhEsr3FpzDU5NLsx57w8M7Tgpvw8AqKtn0dwtPUPfxYUKZcFhMoNAkGRwVKZ2mxRo8rrWZAtj6fOCZVf46zL8qza/GsYi+0qk4+n4NCczPbEiRO0atUqunnzphijVqVKFVqwYAE98cQTtH37dvfUEgAAAEClJc3Zzeda0hISEmjgwIEUFhZGOTk5tHHjRho9erRIy8Etan379qUff/yRevfu7b4aAwAAAJD3zNIsl5a01157TeRBS01NFa1pI0eOpPHjx9OWLVto27Zt4rr58+e7r7YAAAAAlYSmIO3YsWMiFxobOnQoZWZmitUHzEaNGkVHjhwp+1oCAAAAlIDuzhJ0uj+nrPIankFBQSLHmVl4eDilp6uMQAUAAAAoY4rJt1NwaGpJ4xmcnBfNjGd51qtXr/gyL7TOMz0BAAAA3M3k4ysOaGpJmzhxYvGyUKxVq1ZW13MONUwaAADwDCaL47VZTqacHiL5urxI+OUGtaWymvUaSmVVGpyQytLO3ZLKshLlFBSOMubKz8OYa93GoNPrHEqtEVbjdu+PmX9okFRmCAxw6PUsyJBfT//gNJV95aigKKPIoVQlYJu3dFuWS5A2YcIEu9fPmzfP1foAAAAAOMTkLU1i5ZUnDQAAAAA8cOIAAAAAgCdQfLshzbkgjVcW2L17N129elXM8mzUqBE98sgj1LRp07KvIQAAAIAKBGkWrl+/TgMGDKD9+/eL4IxXGWjXrh1t2LCBXnrpJbHw+sKFC91XWwAAAID/z+TjUZqmIO3vf/871a5dm9LS0igwMJCmTp1KGRkZImjj1jVOcFunTh16/vnn3VdjAABwiKKy+nRuRrZUlnw5Qyq7cLOqVNa8TgupLPKOY3JZ4nWpLPtCnkMLrKtRu52j+5akM8hDsf1C5VmgfiFymRq9QV6IvTBbfq6B4fIsUL8I+RRszMWi61ooPj4ZVtPEAU6x8cYbb1BERIQI0ngJqLVr14pAjVNvLF68mJYvX+6+2gIAAAD8f4qiOL35XJDGgZl5xQGxs14v8qYVFf2Z66Vr1650/vz5Uu8nPz9fBHaWm8mIXw8AAADguZYuXSoS+/OKS507d6Zff/3V5m0/+ugj6tGjB1WpUkVsffr0sXt7l4O07t2706xZsyg7O5sKCwvp5ZdfFpMGoqOjxfUpKSmiIqWJj48Xy0lZbpcS12iqOAAAAFRuJpPzm1br168XY+9nz55NBw8epDZt2lC/fv3EeH01O3fupBEjRtCOHTvECk1169alvn370uXLl90TpL311lv0+++/U1RUFIWGhtLq1autujdPnDhRvAC7PXFxcWKNT8sttskoLVUBAACASk4px+7ORYsW0fjx42ncuHHUsmVLWrFiBYWEhNDKlStVb79mzRqaNGkStW3blpo3b07//Oc/xYTLbdu2uWfiALeaHTlyhPbs2SO6LO+55x6KiYkpvt6RAM3cbcqbJb1BXoIDAAAAwBZXFhzgOIa30uITVlBQQAcOHBCNTJZDvrgLk1vJHJGTkyN6Ic29j27Jk3bhwgW6dOkSdenSRQRoJ0+epCVLlogn+thjj2HtTgAAD1aQJ888vHlNXmvzj4tRUtm5avLszlZN75DKqjQ4K5WlX8x063qe+ZnyuOacm/KMSr2f3IGkGOW+r4DIAodmchrzCxyaVatG7yevN6rz15XZTNbKQHEhSuOhV3PmzLEq467MV199VbrtjRs3xBj8GjVqWJXzZY6DHMGpyjhDBgd2bgnSEhISaODAgRQWFiYiwo0bN9Lo0aNFvyw34XFf648//ohADQAAANxOcSF+5VYxHmNmSa0VrSxwNox169aJcWo86cAtY9Jee+01mjZtGqWmptKqVato5MiRon92y5Ytoo+Vr+OKAAAAAHiywMBAkVLMcrMVpHHPocFgoGvXrlmV8+WaNWuWOp6fYyNuxLrrrrs01VFTkHbs2LHicWecuDYzM5OGDBlSfP2oUaPEmDUAAAAAdzOZFKc3LQICAqh9+/ZWg/7NkwB4+JctvArT66+/LnoiO3TooPn5aR6TZs6TxgPmuMmO02eYhYeHi5maAAAAAO6mlGNSWu4aHTNmjAi2OnXqJBL4c0oynu3JePgXr7rEY93YggULRNqyzz//XORWS05OFuU8ZIy3Mg/S+EHOnDlDjRs3Fpd5RkO9evWKr09KSqJatWppuUsAAAAAj18WatiwYSIfLAdeHHBxag1uITNPJuAYiBuwzDhFGc8KtexxtDc5weUgbeLEiWJ2g1mrVq2kZaMwaQAAwHOZiqxnRbLMVLkH5NIFeT3Psw3kGZ+N6lmfB1iV5vJst7TzqWW6nqcx1/rsnJOkcl9G+b4KcwqlspxUeT3ToEh57U5DgJ9DMznzM/NVygpKfQ7g+QusT548WWxqeFKAJUdWYCrTIG3ChAl2r583b56r9QEAAABwiLesweksTRMHAAAAAKB8aJ44AAAAAOAJTK4sOeCrLWk87dRWOQ+cAwAAAHA3RXF+87kgLSMjQ+RH48XVeTYDz3CwnEjAsx4aNmzojnoCAAAASMtCObv5XHfnzJkz6fDhw/TZZ5/RrVu36I033qCDBw/Shg0bRKK3yjCIDwDA1+TnyGtopl6V1/M8c+F2XkyzRu3ulMpatZRnd8acvSyVZV2TZ1WmH5fLHKE2U1Jt9mjuZXnmpV+EfCr0D5fXGvUP9ZfK9AZ5rU1jgTyDNj+tsNT1RxnW6fTs2Z0e3ZL2zTff0AcffCByfjz11FO0f/9+0Xo2YMCA4pXkzcluAQAAANzJ11vSNAVpHJDVr1/fai2rrVu3iuWhHnroIbHouiM4oOOuU8vNZJRzyAAAAABUVpqCNF5d4MSJE1ZlvBQULxqam5tLgwcPduh+eMkEXk7KcruUuEZbzQEAAKBSU9CSdlvfvn1p1apVUjmvQfXDDz+ItTwdERcXJ9b4tNxim4zSUhUAAACo5EyK85vPTRyYM2cOXblyRSrnyQLcorZlyxYxkaA0gYGBYrOkN/w58QAAAADAEd7SIlYuQVqVKlXEVhIHXDzrs0WLFtSrV6+yrB8AAFTAep7pN9KksqTz8tqdJ2pHS2WxDdpLZdEd5NmduWnyTM7CHPl2OeflWZqOUJspaVQry5XHROdfk+/PECzPDDUEG6QyU5H8GEqhyaHbgTaKj8/u1BSkTZkyRbWcc6XNnz+fqlatKi4vWrSobGoHAAAAUElXHNAUpC1evJjatGlDUVFRUiTLEwo4yS1ScAAAAACUc5A2b948+vDDD+ntt9+m3r17F5f7+/vT6tWrqWXLlmVQJQAAAIDS+Xp3p6bZndOnT6f169fTxIkTaerUqVRYKGdQBgAAACgPClJwWOvYsSMdOHBAJLbt0KEDHT16FF2cAAAAUO4UHw/SNHV3WuZF++STT2jdunXUp08fq0XWAQAAAMqDyce7O50K0syGDx9O3bt3Fy1rlstFAQCAd8vLkpf5u5Z0Qyo7US1UKqsWIS+6fk8bOaVHrexsh9KBXKHkMkvL4Qq1RdzVyqD8KF7SIlYhQRqLjY0VGwAAAAB4UJAGAAAAUBEUdHfad+7cOUpMTKRatWpRq1atyqZWAAAAAJU8ma2m2Z2TJk2irKws8X9ubi4NGTKEmjRpQv369RNJbjl3mvl6AAAAAHdSfHx2p6Yg7YMPPqCcnD8Hk77++uv0yy+/0NatW0VgtmvXLkpKSqK5c+e6q64AAAAAVt2dzm4+191p+aQ2bdpECxcupPvuu09c7tatm1izc9q0aRQfH1/2NQUAgHKjmEwOLbp+7lSQVBYWVkcqC251j1R2d2d5YXN5TyKdXs7FmRxovQJ65il5NipUzs+pL9GczNacuDY5OZnuuusuq+u4y/PixYtlVzsAAACASkrzxIGZM2dSSEgI6fV6unLlCt155+18OKmpqWKR9dLk5+eLzZLJWEB6Q4DW6gAAAEAlZfKSsWXl0pLWs2dPOnXqFB06dEgspn7hwgWr6zdv3mwVtNnC3aGRkZFW26XENdprDwAAAJWWgjFpt+3cuVO1nJ8sd4OOHDmSxo4dW+r9xMXF0ZQpU6zKHhz+i5aqAAAAQCWn+HhLWpkksw0MDKTDhw9TixYtHL49b5bQ1QkAAABaKAjSbivZ+mXGC6zPnz+fqlatKi7zLE8AAPAthbnyepkpF69LZcf95VOLwVBTvsMWPaWidl0MUlkdP3+pLCD0qNXl5NCr0m3SE+W1QYsyiuR6gNcyKb49u1NTkLZ48WIxgzMqKkrq7jxx4oSYNGCe/QkAAAAA5RSkzZs3jz788EN6++23xeoCZv7+/rR69WoxmQAAAACgPCg+3t2paXbn9OnTaf369TRx4kSaOnUqFRYWuq9mAAAAAHZgWagSOnbsSAcOHKCUlBTq0KEDHT16FF2cAAAAUO4UpOCQhYWF0SeffELr1q2jPn36iIkDAAAAAOXJ5OPLQrmUgmP48OHUvXt30bJWv379sqsVAAB4hbwseQZl8tkrUpleZf1NohpSidK8u1TW5h45RVONkGCry35Bh6Xb+AVdksrSTmZKZQU3MXTHWyle0m1ZYXnSYmNjxQYAAAAAHpbMFgAAAKC8KciTBgAAAOB5FHR3WuPln3gM2r333kuNGjWiY8eO0dKlS8XgvcGDB1O/fv3cU1MAAAAACwjSLGzYsIGGDh0qVhzIz8+njRs30qOPPipScRgMBnr44Yfp008/FQutAwAAALiTCd2dt82dO5fmzJlDM2bMEOk3OEDj9TxnzpwprueVCN58800EaQAAlZjajM+rajM+DXKqToO+mny7Zh2ksrtaWad+qpKTK92mIEsuy88okMqMuXIaKWOub5/8fYXi4y1pmpLZnjp1ikaNGiX+HzZsGGVnZ9OgQYOKr+fuzsTExLKvJQAAAEAlo6klLTw8nFJTU6lBgwZ069YtKioqEpfN+H9OdFsa7irlzZLJWEB6g5wLBwAAAECN4uPJbDW1pPHqAs8++yytWbOGxowZQ3379qW4uDg6efKkaGWbNm2aSG5bmvj4eIqMjLTaLiWuceV5AAAAQCWjYO3O29566y2KiIigCRMmUEFBgVhsnScNtGzZUmxXrlyh+fPnl3o/HNilp6dbbbFN/uxGBQAAAHA0T5qzm891d9aoUYN+/PFHq7L33nuPXnzxRcrJyaHmzZuTn1/pdxkYGCg2S+jqBAAAAC1MXtIiVqHJbDk44/xpjgRoAABQ+eRmZEllV89dlcr8/A1SWXBwjFQW3uQuq8vNml+XbhNxOVkqu3XxllSWc8V6jDTD7E7wBJqiKk63ocZoNIpuzqpVq4rLixYtKpvaAQAAAFTSiQOagrTFixdTmzZtRDJbS4qi0IkTJyg0NJR0Ol1Z1xEAAABA4i0TAMpl4sC8efPEIH9OXrtjx47ijVcbWL16tfh/+/bt7qstAAAAQAVNHOBlMDkNWVBQEHXu3Jl+/fVXu7f/8ssvxZAwvn3r1q1p8+bN7gvSpk+fLmZ0Tpw4kaZOnUqFhYWaHgwAAADAG1NwrF+/Xgz7mj17Nh08eFD0LPJ65devy+Mh2d69e2nEiBH05JNP0qFDh0Tyf96OHj3qniCNdezYUSywnpKSItJv8IOhixMAAAAqYkya4uSmFY+3Hz9+PI0bN06kHVuxYgWFhITQypUrVW+/ZMkSevDBB0UO2RYtWtDrr79Od999N73//vvuC9IYryrwySefiHxnnOCWJw4AAAAAeIv8/HzKyMiw2kquhmTGuWG5gYpjHjO9Xi8u//zzz6r7cLnl7Rm3vNm6vSrFRRcvXlS++eYbJSsry9W7UvLy8pTZs2eLv9jXPft6W32xb/ns6231xb6e/ZjYt3z29bb6eprZs2dzn6fVxmVqLl++LK7fu3evVfm0adOUTp06qe7j7++vfP7551ZlS5cuVapXr+5wHV0O0spSenq6eBH4L/Z1z77eVl/sWz77elt9sa9nPyb2LZ99va2+niYvL088B8vNVuBZUUEass8CAABApROosvqRLTExMSKTxbVr16zK+XLNmjVV9+FyLbcvszFpAAAAAJVFQEAAtW/fnrZt21ZcZjKZxOUuXbqo7sPllrdnW7ZssXl7NWhJAwAAACgFp98YM2aMyGzRqVMnkeA/OztbzPZko0ePpjp16lB8fLy4/Pzzz1OvXr3o7bffpocffpjWrVtH+/fvpw8//JC8MkjjZkfOP+Jo8yP21b6vt9UX+5bPvt5WX+zr2Y+JfctnX2+rr7cbNmyYSD82a9YsSk5OprZt21JCQgLVqFFDXJ+UlCRmfJp17dqVPv/8c3rllVfo5ZdfpqZNm9I333xDrVq1cvgxdTwwzS3PBgAAAACchjFpAAAAAB4IQRoAAACAB0KQBgAAAOCBEKQBAAAAeCAEaQAAAAAeqEJTcNy4cUOsHs+LjfJ0VsaZeHna6tixY6latWrkaa5evUrLly+n3bt3i/95um2jRo1o0KBBos6ckRgAAADAVRWWguO3334Tq8GHhISIVeLNeUZ4yQTO0JuTk0M//PCDSBqn5sSJE7Rv3z6Rubd58+Z08uRJWrJkiVjB/rHHHqPevXur7nfw4EGqUqUKNWzYUFz+7LPPaMWKFSK/Sf369Wny5Mk0fPhw1X05CR3XtUmTJhQcHCyCy5EjR1JBQYGoa8uWLUXOlPDwcPJ1aWlptGnTJpG8z1H8nqxatUq8zrZ8/fXX1L9/f/G5gNJxxmvLvDyW5ZcuXaJ69eo5dD/nzp2jxMREqlWrVqk5fA4fPkwHDhyge++9V/xAOXbsGC1dulQ85uDBg8X32pfw95tzG6n9mBw4cKDIRG5LamoqHTlyhNq0aUPR0dHih+nHH38sjlOPPvootWjRQnU/fu+CgoLEUjTsp59+sjpOPfvsszazlufm5tLatWtVf0jef//9VJnw+eSDDz4Qea0cxa8VH885p5UtnJx0yJAhdo9lvvJ5YvhMVSClgnTu3Fl5+umnFZPJJF3HZXzdPffco7rv999/rwQEBCjR0dFKUFCQuFytWjWlT58+Su/evRWDwaBs27ZNdd+77rpL2bJli/j/o48+UoKDg5W///3vyvLly5UXXnhBCQsLUz7++GPVfbt166a8+uqrxZc/++wz8TzYzZs3lbZt24r7sufixYtKZmamVF5QUKD897//VRzBr8/27duVDz/8UNm0aZPY1x6+zcyZM5Xdu3eLy/za9O/fX+nXr5/ywQcfKM74/fffFb1er3rdv//9b9WN35f333+/+LIanU6nREREKOPHj1f27dvnVN1++eUXZfHixcr06dPFxv9zmSOv69mzZ5XCwkJxOT8/X1m3bp3yySefKCkpKTb34wV5Ld+DxMRE5eWXX1Yee+wxZcaMGeI+S8PvyZw5c5QJEyYokyZNUt566y3l9OnTNm/PCwE/+uij4vPPi/Xy+1tUVFR8fXJyss33Z+LEicWfwZycHOVvf/ubuC2/9vz3vvvuU/2Msq+//lq8j1WrVhXfFf4uRUVFie8ef574ujVr1ijO4O8Qv9ZacF3Pnz9v9zZfffWVkp2d7VSdzpw5ozRq1Ei8zr169VKGDh0qNv6fy5o0aSJuo4Y/c5GRkeJ1rVKlirJ//36lYcOGStOmTZXGjRuLY8+BAwdU9+UFm/l7y7755hvxvjzyyCPKSy+9pAwePFgs3Gy+vmR969evLz4TdevWFY/98MMPi+MUvzf8mTF/vm0xGo02yy9cuKA4ij/3P/74o/K///3PoeMJH3f/+OMPcfno0aPic/rMM88oCQkJDj+m2v3a+h4sWbJEdePXKS4urviyGn5d+Xb8uedjBB8rfPHzVFafKXBehQVp/IE8ceKEzev5Or6Nmi5duoiTH1u7dq34wPJJ0YxPzA888IDqvvxBNh/U27VrJwIdS3yCadmypc19zQcR80GLP9x8QmR8QKpdu7bqvleuXFE6duwovhz8wX788cetToT2TqocUN26dUv8n5qaKr4c/EXhwJT3ad68uXL9+nXVfVesWKH4+fkp7du3F8EPB5bh4eHKU089JQ6A/Jw4iFELAuxtP/30k836mk/2/NfWZm/f1157Tbw3/P+dd96pvPPOO8qNGzeU0ly7dk3p3r272I8PKnxg4o3/5zK+jm+j5uTJk+J2XC8+UPIJhl+z0NBQJSQkRImJibEZNPEB9ssvvxT/cyAcGBgofgwMGzZMPA/ef+/evTbrzHXkx+X3if/y49asWVN8TqZNm6a6H/8YuOOOO8Tj8o8NrjsfOM0nC/488XNWw49hfh34ZBQbGyuCfg5kuP58wOfvkJq7775beeONN4q/exyg8ftlxsEl/1jxlcCfT8IDBw4Un/mSuIyv69u3r819+XuWkZGhvPnmm+J15stm48aNUwYNGqS6L3/uzME9f9/nz59vdf17770nPltqxwr+Xpt//PJ+XMb489ugQQNl9uzZPhP4Hz582O62fv16u8cafk/4NbHcuLxOnTrifw6CbO27atUq8f7zOYDr/vzzz5cakHrb58nVzxR4cZDGb6y9X818HZ941PAB1/xrgwMlPrkdPHiw+Hr+otSoUUN1X/4y8S8QxgciPjFY4lYQDlzUcH3MrVHmwIu/rHxQYufOnbMZWI4ePVp8OX777TdxEOITcYcOHUTrQWknVS43n1T5YMhBpPkLxy1zfF/cAqOGb2sORPlEzPVbunRp8fV8oGnRooXqY/LBzdZmL9B68MEHRcBQMiDi9+nYsWOq+6g9V36f+PnyAZsDHz6BcCBsC58YOIDngKskLuvatasyZMgQ1X354Mi/LI8cOSJaVPk14TJuIeOWsgEDBoiWMVufR3MAxwHbiy++aHX9K6+8Ilph1XAgxwdWPkDz40yePFl8Vsyta/x5VQui69Wrp+zYsaP4Mrf0cbDHB3i+H3snVMvXuFWrVsrnn39udT0HOxwA2jrY8+ec8UGbT1D8mpnxjxg+0arxxsCfjwX2Trz83G0dL/jH4/Hjx8X//Dni+lm26HKrBwcDarjFhIMM83HK/L/lcYqD/5K4zPLHBAft/B6Znyu3ovCx11cCf3ufi9KOUxx48P2a3yNnj1P8d8GCBeLHMj8W/xjnYy4HU97+eXL1MwVeHKTxr18+8fKBgU8K/AuXN/6fy/iDahlMlDwp8ofKjE8Kli1c3FJmK1jiE+2TTz4p/ueTPp9ALc2bN09p3bq16r78S4lPaty9ygcg/nV47733Fl/PzfJ8MFLDLWyWXyjziZ8PEtw65uhJtVmzZlKLwdatW23+4uPX0bKLgr9clgcJPuGqfTn5NeYDz86dO1U3Pojbqi9btGiRaBq3bELXevAzy83NVT799FPxWvNj2jog8OfAMlgviYM+WwEEt0oeOnRI/J+VlSXqwUGD2Z49e0RgZCtwMbcK848DtcDf1uPy68xdO2b82PwemX9pc8snv+dq72vJblQ+KXCQyl3+fJ29z5O55ZVbCC0f3/z9sXWi4BY+848c/oHB92UZLP7666/iNrYe19sC/1q1atnsBmLffvutuE1pAa3acYq/l7aOU/yDwRzUcGtSyW43/v5xN5faccayyystLU08f3PAwJ8Lft5qvDHw5x8x3E3Kn1m17bvvvrN7nNqwYYM4TnFLkqvHKbZr1y5lzJgx4jnx5u2fJ1c/U+DFQRrjvnxuXeIvhfnXD//PZdxMbQt3JXGgZMZBh2WfOH9RbAUtly9fFif5nj17KlOmTBEnI+4G464QLuOxbvzFVsPN9Tx+wFxfbpmxPFH+8MMPyhdffGHzC1ayu4zrzK0o/Hz4oOTISZV/BamdVG19SfgXLb8e5ufO92X5/Djg4tuUxAERB2m2cCBi61e1GQc93JLH4wv5F7UjBz/LX+RquAXVsmu75AGbn48tfALi2zgSzPIB0PKHQFJSks3XmIOihQsXiv/5M1GyhZjHRNkK8Dg4tHxNuFWWXwMO3BkfhNUelwM3tc8pf0Y5UGvTpo3dzxO3InCLH3+eSgYpfEDm4M3Wjxz+fv7rX/8SPzL4gM9jRzlI5dZKbkm01VrpjYE/d/lxCwY/Nrc+cKDCG//PZTwu1lZXD7esWI6N/c9//lPc6s74R6nad49xiwl/VrlV9fXXXxefR37t586dK8r4M8Gt4CVxgMDvAb8ffGwyd7mb8evMr6Eabwz8OYDk18eV49SlS5fE8+MfAlevXi2T4xT/yCo5lMYbP0+ufqbAy4M0M2665a5D3kobBM94kD9/QG3h5nZza5ka/iXAAyY5gOBfHhyYcdP+yJEjRXdkafgAb2t8hS3cOscn65LMgRqfxO0dAB966CExwJO/4CV/ifGX01b37rPPPit+IXF3Av8q5i8cf9k5yOWWP67XE088Ie3HBxhbg2YZH1gsJ1HYwgcRDgi4Djy2xNlfqI7gAff8PvKvY8sxH/w/l/GJmLsT1XALqGXL2bJly6y6KzhwsXWi4PFm3J3AB1f+Rc4nKW6h5XE0s2bNEq02tgJefk+5m5Zb0Pizz12tPCbO8r1Ve9znnnvOZjDE9eZAytbniQ+4HJyYNw6OLPFBnG9j633n8Z58kOcAjcdK8mtqbgXj99kyuPX2wN88BodbNyxbAvl/LrP3fPj7wd13tvBj/vWvf7V5Pb+Ow4cPF2NIzT9iuZWJfwhs3LhRdR9+nhw0m+vK3wfL1mXuynz33XdV9/XGwJ+/19zabAsHfatXr1ZKwy143ItiHgvqzuNURX6eOMDS8nkq+Zkyj/e1bFmz95kCHwnSKoP/+7//szkglAM1bo62dYIaO3as1VaylZEHl/OBTQ2f/LmVkLsg+MTG4wl40CkHpvx4fOJ09mCjBXd3cABS2mPxL261Gb+O4C4ZHpvHz40PfByA88b/cxl3c/Ft1PAJpmSwYik+Pl4EyrZwoGZ5IDNvPEZEbUyZGbeUcYDIwQYfMDmgM88+ZvzrVm0sD598SrZWMPNrx4GavVZFNeZ9uU481lEL3qdki7YvBP6WuBWB32feHJmxWxoOMm19Hku+L/zaOPojlnGrfWnvR0kcbHtb4F/WuEWPv6/mscLu5E2fJ8vPlOVkEnC/CsuTVtkUFRWJ3G8RERE2r798+bJTeXeys7NFEl3Og+OovLw8Kiws9MmcbhkZGSKPl2UOovbt29t87R3NI8avL+cRsyclJYXOnj0rcobxbRs0aFDqffPnYs+ePSLX0T333FOcy8gZnGOJ85jZypfkifs6i/P0bd++neLi4qh69eo2b3fhwgWRL06n05Vb3bw19+GVK1fozjvvVL0+MzNT5Jns1auX5vvm7wR/RmJjYzXtw98NzoPp51ehedc9JiG6N+4LrsGyUOWEDzL2ggT+4M+ZM8ep+7558yZNmjRJ0z4ccHCAdvHiRXriiSdsJjDkL+Xx48dVg7xPP/3U5v1X1L6c5JgT4nKANGLECGrXrh198cUX9MILL4gTuj28Lyfb5cTIjP9OnDhRvD4cpNkL0Mz78nvRuXNnkTB5wYIFYt/SHpeDCE42yUmSOUCzfFxb+06ZMkV1MxqNNH/+/OLLnrSv2o8Lfs1mzJhB77//vkjU6SjelxN5ctJjfn/t7cs/fCwDNC2Py0EJv/dmnPy6W7duVLduXerevTutW7fOo/Z15TE54St/fm3h44WtAO25554TCVJt4RO6rQDN1r68DydWLi1A4/eQk2qbnxs/Z04szsHdyy+/LH4Ae8q+nBCdf8hs3rxZ/Eg+c+aM+AEZGhpKU6dOpZ49e4pgWI037gtloBxa68DFHFEVse+pU6eK84vx9TypgpvHzezN9KqofV1Jcuxt+/LrwzODLbuYeONyTgHA//PsYzUVtS+nNTFPiOCJGDxGkMfy8X78/Hksk61un5L78mfE2X21PK4rya8rYl9XHtOye5HHTfEgekdV1L7cjcpjrHhcJ48n4/15gDyPweUxZvxd4nGhnrKvKwnRvXFfcB2CtHJiKxmneeO8TVoTebpzX57MwCkPeBo+D6zm/3nGrHkGpL1gqaL2dSXJsbfty2Pk+HUpGcA5MpC+ova1HBs2atQoMWDZnKSZB6dzYDpixAiP2tfV5Nflva8rj8mvE6fz4VRDPMifx0jyWFmeqGRrFYKK3pfHc3IyXPMPTv5RwxMQLCcWWE7Eqeh9XUmI7o37gusQpJUTV5Nxlve+3LpgmauIB5vyoHyehcpfWHvBUkXt60qSY2/cl1MTcO6pf/zjH8UDgB0JlipqX8tgiZfGKTkDkHPR2ZrKX1H7upL8uiL2deUxLV8nfl95gpI54z+fhPnHg60liypqX7U8kJYTajhgtZWktSL2dSUhujfuC67DmLRywuOZNmzYIAaUq208lsST9uVxYZZjQXhMDw8cHTBggBiXcvr0aZuPWVH7mm/PeGArj7uLjIy0GlOTnp7uM/t27NhRTJDgyQodOnSgo0ePOjw4vqL2Nd+OxxaWHONXp04dcZ+etG///v3F54/x5++rr76yup7Hw/FYQk/Z15XHtOTv709Dhw6lhIQEMYB//PjxtGbNGmrWrJlH7cuTgsxjV3msFI+NtBzLeuzYMZuTSipiXx5oP2HCBPH8duzYQaNGjRLvU3BwsLj+1KlT4vOoxhv3hTJQBoEeOIDz/3AiQ2dyRFXEvjxeh5N92sq9xqkibLVoVdS+riQ59sZ9LXFXKbe48WvjSGtYRezLnzPOy8fdcDw2qmTewP/+9782l7WpqH1dSX5dEfu68pilpSrhVm1bqzNU1L6cj5DHf/Ealvw94eEB3OrOY/B43WJuIS25TFtF7utKQnRv3BdchyCtnPDJ1vJkrJbPzFZeq4rYlwe/mhfRVcM5x2wFhhW1rytJjr1x35I4txmvo8fvqVblsS8PPrbcOJmypalTp4rkrZ60r6vJrytiX2f34+DOkfVMPWlfHiLAWfP/8pe/iGMHB3T8w4GDJO765byStj6XFbWvswnRvXlfcB7ypAEAAAB4IIxJAwAAAPBACNIAAAAAPBCCNAAAAAAPhCANAAAAwAMhSAMAAADwQAjSAAAAADwQgjQAAAAA8jz/Dyly+8ecEBSPAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "fig = problem.render(design)\n", + "plt.title(f\"Design (sample {sample_idx})\")\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Step 8 — Break constraints deliberately\n", + "\n", + "### The EngiBench constraint API\n", + "\n", + "Every EngiBench problem declares **design constraints** — rules a design must satisfy\n", + "to be physically valid. The `@constraint` decorator wraps a function that `assert`s\n", + "what must be true; `check_constraints()` catches failures and returns a `Violations`\n", + "object.\n", + "\n", + "Each constraint is tagged with a **category** that tells you *why* it exists:\n", + "\n", + "| Category | Import | Meaning |\n", + "|---|---|---|\n", + "| **`THEORY`** | `from engibench.constraint import THEORY` | The constraint comes from **physics**. Values outside the domain are unphysical (e.g. negative volume fraction) but may not crash the solver. |\n", + "| **`IMPL`** | `from engibench.constraint import IMPL` | The constraint guards the **implementation**. Violating it causes runtime errors or undefined behavior in the solver (e.g. mesh resolution too small). |\n", + "\n", + "Constraints also have a **criticality** level:\n", + "- `Criticality.Error` — hard violation, design is infeasible\n", + "- `Criticality.Warning` — soft violation, solver may still run but results are suspect\n", + "\n", + "The `Violations` object returned by `check_constraints()` supports filtering:\n", + "```python\n", + "violations.by_category(THEORY) # only physics violations\n", + "violations.by_category(IMPL) # only implementation violations\n", + "violations.by_criticality(Criticality.Warning) # only warnings\n", + "```\n", + "\n", + "For example, in `beams2d` the volume fraction has a `THEORY` constraint (physically,\n", + "volfrac must be in [0, 1]) **and** a stricter `IMPL` warning (the solver works best\n", + "with volfrac in [0.1, 0.9]).\n", + "\n", + "---\n", + "\n", + "### Exercise: force a constraint violation\n", + "\n", + "A design is only valid if it **satisfies all constraints for its operating conditions**. Let's see what happens when we lie about the conditions.\n", + "\n", + "**Your task:** copy the valid `config`, change one **scalar** condition to an extreme value, and call `problem.check_constraints(design=design, config=bad_config)`.\n", + "\n", + "The function returns a `Violations` object — `len(violations) == 0` means no violations." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# PUBLIC FILL-IN CELL 00-C\n", + "# Goal: force a constraint violation and inspect the result.\n", + "\n", + "# Find a scalar condition to perturb\n", + "scalar_keys = [k for k in problem.conditions_keys if np.asarray(config[k]).ndim == 0]\n", + "perturb_key = scalar_keys[0]\n", + "original_val = float(config[perturb_key])\n", + "\n", + "# START FILL ---------------------------------------------------------------\n", + "bad_config = dict(config)\n", + "bad_config[perturb_key] = original_val * 10 # push it far outside the valid range\n", + "# END FILL -----------------------------------------------------------------\n", + "\n", + "print(f\"Perturbing '{perturb_key}': {original_val} → {bad_config[perturb_key]}\")\n", + "violations = problem.check_constraints(design=design, config=bad_config)\n", + "\n", + "print(f\"\\n--- All constraint checks for {PROBLEM_ID} ({violations.n_constraints} total) ---\")\n", + "print(f\"Violations triggered: {len(violations)}\\n\")\n", + "if violations:\n", + " print(violations)\n", + "else:\n", + " print(\"No violations. Try a more extreme value.\")\n", + "\n", + "# CHECKPOINT\n", + "assert hasattr(violations, \"__len__\"), \"violations should be a Violations object\"\n", + "print(\"\\n✅ Checkpoint passed — constraint checking explored.\")" + ] + }, + { + "cell_type": "markdown", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "---\n", + "## Step 9 — Simulate and optimise\n", + "\n", + "Beyond constraint checking, EngiBench problems expose **`simulate()`** and **`optimize()`** methods — the same solvers used to generate the dataset.\n", + "\n", + "| Method | What it does | Returns |\n", + "|---|---|---|\n", + "| `problem.simulate(design, config)` | Evaluate objective(s) for a given design | `np.ndarray` of objective values |\n", + "| `problem.optimize(starting_point, config)` | Run the full optimiser from a starting design | `(optimised_design, optimisation_history)` |\n", + "\n", + "> **Colab note:** some problems (e.g. `heatconduction2d`, `heatconduction3d`, `airfoil`) require a **Docker container** for their solver and will not run on Colab. Problems like `beams2d` and `thermoelastic2d` use pure-Python solvers and work everywhere." + ] + }, + { + "cell_type": "code", + "source": [ + "# Simulate: evaluate the objective for an existing design\n", + "obj_values = problem.simulate(design, config)\n", + "print(f\"Objective values for sample {sample_idx}: {obj_values}\")\n", + "print(f\"Objectives defined: {problem.objectives}\")" + ], + "metadata": {}, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "# Optimize: run the solver from a uniform starting point\n", + "starting_point = np.full(problem.design_space.shape, float(config[\"volfrac\"]))\n", + "optimised_design, history = problem.optimize(starting_point, config)\n", + "\n", + "print(f\"Optimisation ran for {len(history)} steps\")\n", + "print(f\"Final objective: {history[-1].obj_values}\")\n", + "\n", + "# Compare: generated design vs. dataset design\n", + "fig, axes = plt.subplots(1, 2, figsize=(10, 4))\n", + "axes[0].imshow(design, cmap=\"gray_r\", vmin=0, vmax=1)\n", + "axes[0].set_title(f\"Dataset design (obj={obj_values[0]:.4f})\")\n", + "axes[0].axis(\"off\")\n", + "axes[1].imshow(optimised_design, cmap=\"gray_r\", vmin=0, vmax=1)\n", + "axes[1].set_title(f\"Re-optimised (obj={history[-1].obj_values[0]:.4f})\")\n", + "axes[1].axis(\"off\")\n", + "plt.tight_layout()\n", + "plt.show()" + ], + "metadata": {}, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "---\n", + "## Reflection\n", + "\n", + "Before moving on, think about:\n", + "\n", + "1. **The API contract** — what is *fixed* by the benchmark (design space, conditions, objectives) vs. what is *yours* to choose (model, hyperparameters, training strategy)?\n", + "2. **Constraints as a test** — why is it important that `check_constraints` exists as a separate function, rather than just training on feasible data?\n", + "3. **Simulate vs. optimise** — `simulate` is cheap (one forward pass), `optimize` is expensive (iterative solver). How might you use each when evaluating a generative model?\n", + "4. **What surprised you?** — anything about the design shapes, condition ranges, or dataset size that you did not expect?" + ], + "metadata": {} + }, + { + "cell_type": "markdown", + "source": [ + "---\n", + "## Next\n", + "\n", + "Proceed to **Notebook 01** where you will train a generative model on this exact benchmark and produce new designs. The API you just learned carries over unchanged." + ], + "metadata": {} + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "name": "python", + "version": "3.11.0" + }, + "accelerator": "GPU", + "colab": { + "provenance": [], + "gpuType": "T4" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/workshops/dcc26/solutions/01_train_generate.ipynb b/workshops/dcc26/solutions/01_train_generate.ipynb new file mode 100644 index 0000000..992f2a2 --- /dev/null +++ b/workshops/dcc26/solutions/01_train_generate.ipynb @@ -0,0 +1,1436 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Notebook 01: Train a Generative Model for Inverse Design\n\n**Can we learn to skip the optimizer?**\n\nIn Notebook 00 you saw that EngiBench bundles an optimizer with every problem.\nRunning that optimizer produces an optimal design — but it takes time. For\nBeams2D it runs in seconds, but for complex 3D problems it can take minutes or\nhours *per design*.\n\nGenerative AI offers a different approach: **train a neural network once on a\ndataset of optimal designs, then generate new designs instantly.** The trade-off\nis quality for speed — and the central question of this workshop is *how do we\nmeasure that trade-off rigorously?*\n\n### What you will do\n\n| Step | What happens | Key concept |\n|------|-------------|-------------|\n| **Prepare data** | Extract conditions and designs from EngiBench | The standardised data API |\n| **Train a model** | Fit a neural network to map conditions → designs | Supervised learning on design data |\n| **Generate designs** | Produce new designs for unseen conditions | Instant inference vs. slow optimization |\n| **Inspect results** | Compare generated vs. ground-truth designs visually | Setting up evaluation (Notebook 02) |\n\n> **Heads up:** We deliberately train a simple model with limited data and few\n> epochs. The results will be imperfect — **that is the point.** Understanding\n> *why* they are imperfect motivates the rigorous benchmarking we explore in\n> Notebook 02 and the discussion session.\n\n---\n\n### Exercise legend\n| Marker | Meaning |\n|---|---|\n| `FILL-IN CELL` | Your turn — edit the code between `START FILL` / `END FILL` |\n| `CHECKPOINT` | Automated check — if it fails, fix before moving on |" + ], + "id": "cell-0" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "> **Colab users:** click **File > Save a copy in Drive** before editing so your changes persist." + ], + "id": "cell-1" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 0. Install dependencies" + ], + "id": "cell-2" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Colab / local dependency bootstrap\nimport subprocess, sys\n\nIN_COLAB = \"google.colab\" in sys.modules\nFORCE_INSTALL = False # Set True to force install outside Colab\n\nif IN_COLAB or FORCE_INSTALL:\n def _pip(pkgs): subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", *pkgs])\n _pip([\"engibench[all]\", \"sqlitedict\", \"matplotlib\", \"tqdm\", \"tyro\", \"wandb\"])\n _pip([\"git+https://github.com/IDEALLab/EngiOpt.git@codex/dcc26-workshop-notebooks#egg=engiopt\"])\n try:\n import torch\n except Exception:\n _pip([\"torch\", \"torchvision\"])\n print(\"Install complete.\")\nelse:\n print(\"Using current environment. Set FORCE_INSTALL=True to install here.\")" + ], + "id": "cell-3" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n\n## The inverse design problem\n\nTraditional topology optimization works like this:\n\n```\nConditions (volfrac, loads, …) ──► [ Optimizer (iterative) ] ──► Optimal design\n ⏱ seconds to hours\n```\n\nA **learned generator** replaces the optimizer with a neural network:\n\n```\nConditions ─┐\n ├──► [ Neural network ] ──► Approximate design\nRandom noise ─┘ ⏱ milliseconds\n```\n\nThe noise input lets the model produce **diverse** designs for the same\nconditions — useful for exploring the design space. But the designs are only\n*approximate*: the network has to generalise from training examples rather than\nsolving the physics directly.\n\n**Key question:** How close can a learned generator get to the optimizer? That\nis what benchmarking measures." + ], + "id": "cell-4" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 1. Imports" + ], + "id": "cell-5" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "i", + "m", + "p", + "o", + "r", + "t", + " ", + "i", + "m", + "p", + "o", + "r", + "t", + "l", + "i", + "b", + "\n", + "i", + "m", + "p", + "o", + "r", + "t", + " ", + "j", + "s", + "o", + "n", + "\n", + "i", + "m", + "p", + "o", + "r", + "t", + " ", + "r", + "a", + "n", + "d", + "o", + "m", + "\n", + "i", + "m", + "p", + "o", + "r", + "t", + " ", + "s", + "y", + "s", + ",", + " ", + "o", + "s", + "\n", + "f", + "r", + "o", + "m", + " ", + "p", + "a", + "t", + "h", + "l", + "i", + "b", + " ", + "i", + "m", + "p", + "o", + "r", + "t", + " ", + "P", + "a", + "t", + "h", + "\n", + "\n", + "i", + "m", + "p", + "o", + "r", + "t", + " ", + "m", + "a", + "t", + "p", + "l", + "o", + "t", + "l", + "i", + "b", + ".", + "p", + "y", + "p", + "l", + "o", + "t", + " ", + "a", + "s", + " ", + "p", + "l", + "t", + "\n", + "i", + "m", + "p", + "o", + "r", + "t", + " ", + "n", + "u", + "m", + "p", + "y", + " ", + "a", + "s", + " ", + "n", + "p", + "\n", + "i", + "m", + "p", + "o", + "r", + "t", + " ", + "t", + "o", + "r", + "c", + "h", + " ", + "a", + "s", + " ", + "t", + "h", + "\n", + "\n", + "#", + " ", + "W", + "o", + "r", + "k", + "s", + "h", + "o", + "p", + " ", + "h", + "e", + "l", + "p", + "e", + "r", + "s", + " ", + "(", + "v", + "i", + "s", + "u", + "a", + "l", + "i", + "z", + "a", + "t", + "i", + "o", + "n", + " ", + "+", + " ", + "t", + "r", + "a", + "i", + "n", + "i", + "n", + "g", + " ", + "u", + "t", + "i", + "l", + "i", + "t", + "i", + "e", + "s", + ")", + "\n", + "i", + "f", + " ", + "\"", + "g", + "o", + "o", + "g", + "l", + "e", + ".", + "c", + "o", + "l", + "a", + "b", + "\"", + " ", + "i", + "n", + " ", + "s", + "y", + "s", + ".", + "m", + "o", + "d", + "u", + "l", + "e", + "s", + ":", + "\n", + " ", + " ", + " ", + " ", + "i", + "m", + "p", + "o", + "r", + "t", + " ", + "s", + "u", + "b", + "p", + "r", + "o", + "c", + "e", + "s", + "s", + "\n", + " ", + " ", + " ", + " ", + "_", + "u", + "t", + "i", + "l", + "s", + " ", + "=", + " ", + "\"", + "/", + "c", + "o", + "n", + "t", + "e", + "n", + "t", + "/", + "w", + "o", + "r", + "k", + "s", + "h", + "o", + "p", + "_", + "u", + "t", + "i", + "l", + "s", + "\"", + "\n", + " ", + " ", + " ", + " ", + "o", + "s", + ".", + "m", + "a", + "k", + "e", + "d", + "i", + "r", + "s", + "(", + "_", + "u", + "t", + "i", + "l", + "s", + ",", + " ", + "e", + "x", + "i", + "s", + "t", + "_", + "o", + "k", + "=", + "T", + "r", + "u", + "e", + ")", + "\n", + " ", + " ", + " ", + " ", + "_", + "b", + "r", + "a", + "n", + "c", + "h", + " ", + "=", + " ", + "\"", + "c", + "o", + "d", + "e", + "x", + "/", + "d", + "c", + "c", + "2", + "6", + "-", + "w", + "o", + "r", + "k", + "s", + "h", + "o", + "p", + "-", + "n", + "o", + "t", + "e", + "b", + "o", + "o", + "k", + "s", + "\"", + "\n", + " ", + " ", + " ", + " ", + "_", + "b", + "a", + "s", + "e", + " ", + "=", + " ", + "f", + "\"", + "h", + "t", + "t", + "p", + "s", + ":", + "/", + "/", + "r", + "a", + "w", + ".", + "g", + "i", + "t", + "h", + "u", + "b", + "u", + "s", + "e", + "r", + "c", + "o", + "n", + "t", + "e", + "n", + "t", + ".", + "c", + "o", + "m", + "/", + "I", + "D", + "E", + "A", + "L", + "L", + "a", + "b", + "/", + "E", + "n", + "g", + "i", + "O", + "p", + "t", + "/", + "{", + "_", + "b", + "r", + "a", + "n", + "c", + "h", + "}", + "/", + "w", + "o", + "r", + "k", + "s", + "h", + "o", + "p", + "s", + "/", + "d", + "c", + "c", + "2", + "6", + "/", + "u", + "t", + "i", + "l", + "s", + "\"", + "\n", + " ", + " ", + " ", + " ", + "f", + "o", + "r", + " ", + "_", + "f", + " ", + "i", + "n", + " ", + "(", + "\"", + "n", + "o", + "t", + "e", + "b", + "o", + "o", + "k", + "_", + "h", + "e", + "l", + "p", + "e", + "r", + "s", + ".", + "p", + "y", + "\"", + ",", + " ", + "\"", + "_", + "_", + "i", + "n", + "i", + "t", + "_", + "_", + ".", + "p", + "y", + "\"", + ")", + ":", + "\n", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + "i", + "f", + " ", + "n", + "o", + "t", + " ", + "o", + "s", + ".", + "p", + "a", + "t", + "h", + ".", + "e", + "x", + "i", + "s", + "t", + "s", + "(", + "f", + "\"", + "{", + "_", + "u", + "t", + "i", + "l", + "s", + "}", + "/", + "{", + "_", + "f", + "}", + "\"", + ")", + ":", + "\n", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + "s", + "u", + "b", + "p", + "r", + "o", + "c", + "e", + "s", + "s", + ".", + "c", + "h", + "e", + "c", + "k", + "_", + "c", + "a", + "l", + "l", + "(", + "[", + "\"", + "w", + "g", + "e", + "t", + "\"", + ",", + " ", + "\"", + "-", + "q", + "\"", + ",", + " ", + "f", + "\"", + "{", + "_", + "b", + "a", + "s", + "e", + "}", + "/", + "{", + "_", + "f", + "}", + "\"", + ",", + " ", + "\"", + "-", + "O", + "\"", + ",", + " ", + "f", + "\"", + "{", + "_", + "u", + "t", + "i", + "l", + "s", + "}", + "/", + "{", + "_", + "f", + "}", + "\"", + "]", + ")", + "\n", + "e", + "l", + "s", + "e", + ":", + "\n", + " ", + " ", + " ", + " ", + "_", + "u", + "t", + "i", + "l", + "s", + " ", + "=", + " ", + "o", + "s", + ".", + "p", + "a", + "t", + "h", + ".", + "a", + "b", + "s", + "p", + "a", + "t", + "h", + "(", + "\"", + ".", + ".", + "/", + "u", + "t", + "i", + "l", + "s", + "\"", + ")", + " ", + "i", + "f", + " ", + "o", + "s", + ".", + "p", + "a", + "t", + "h", + ".", + "i", + "s", + "d", + "i", + "r", + "(", + "\"", + ".", + ".", + "/", + "u", + "t", + "i", + "l", + "s", + "\"", + ")", + " ", + "e", + "l", + "s", + "e", + " ", + "\"", + "w", + "o", + "r", + "k", + "s", + "h", + "o", + "p", + "s", + "/", + "d", + "c", + "c", + "2", + "6", + "/", + "u", + "t", + "i", + "l", + "s", + "\"", + "\n", + "s", + "y", + "s", + ".", + "p", + "a", + "t", + "h", + ".", + "i", + "n", + "s", + "e", + "r", + "t", + "(", + "0", + ",", + " ", + "_", + "u", + "t", + "i", + "l", + "s", + ")", + "\n", + "i", + "m", + "p", + "o", + "r", + "t", + " ", + "n", + "o", + "t", + "e", + "b", + "o", + "o", + "k", + "_", + "h", + "e", + "l", + "p", + "e", + "r", + "s", + " ", + " ", + "#", + " ", + "n", + "o", + "q", + "a", + ":", + " ", + "E", + "4", + "0", + "2", + "\n", + "i", + "m", + "p", + "o", + "r", + "t", + "l", + "i", + "b", + ".", + "r", + "e", + "l", + "o", + "a", + "d", + "(", + "n", + "o", + "t", + "e", + "b", + "o", + "o", + "k", + "_", + "h", + "e", + "l", + "p", + "e", + "r", + "s", + ")", + " ", + " ", + "#", + " ", + "a", + "l", + "w", + "a", + "y", + "s", + " ", + "p", + "i", + "c", + "k", + " ", + "u", + "p", + " ", + "l", + "a", + "t", + "e", + "s", + "t", + " ", + "e", + "d", + "i", + "t", + "s", + "\n", + "f", + "r", + "o", + "m", + " ", + "n", + "o", + "t", + "e", + "b", + "o", + "o", + "k", + "_", + "h", + "e", + "l", + "p", + "e", + "r", + "s", + " ", + "i", + "m", + "p", + "o", + "r", + "t", + " ", + "*", + " ", + " ", + "#", + " ", + "n", + "o", + "q", + "a", + ":", + " ", + "F", + "4", + "0", + "1", + ",", + "F", + "4", + "0", + "3", + "\n", + "\n", + "f", + "r", + "o", + "m", + " ", + "e", + "n", + "g", + "i", + "b", + "e", + "n", + "c", + "h", + ".", + "u", + "t", + "i", + "l", + "s", + ".", + "a", + "l", + "l", + "_", + "p", + "r", + "o", + "b", + "l", + "e", + "m", + "s", + " ", + "i", + "m", + "p", + "o", + "r", + "t", + " ", + "B", + "U", + "I", + "L", + "T", + "I", + "N", + "_", + "P", + "R", + "O", + "B", + "L", + "E", + "M", + "S", + "\n", + "f", + "r", + "o", + "m", + " ", + "e", + "n", + "g", + "i", + "o", + "p", + "t", + ".", + "c", + "g", + "a", + "n", + "_", + "c", + "n", + "n", + "_", + "2", + "d", + ".", + "c", + "g", + "a", + "n", + "_", + "c", + "n", + "n", + "_", + "2", + "d", + " ", + "i", + "m", + "p", + "o", + "r", + "t", + " ", + "G", + "e", + "n", + "e", + "r", + "a", + "t", + "o", + "r", + " ", + "a", + "s", + " ", + "E", + "n", + "g", + "i", + "O", + "p", + "t", + "C", + "N", + "N", + "G", + "e", + "n", + "e", + "r", + "a", + "t", + "o", + "r" + ], + "id": "cell-6" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 2. Configuration\n\nAll tuneable knobs in one place. **Experiment with these** — especially\n`EPOCHS` and `N_TRAIN` — to see how they affect the generated designs." + ], + "id": "cell-7" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# ---------- Reproducibility ----------\n", + "SEED = 7\n", + "\n", + "# ---------- Problem ----------\n", + "PROBLEM_ID = \"beams2d\" # Change to try a different EngiBench problem\n", + "\n", + "# ---------- Training ----------\n", + "EPOCHS = 15 # Short for workshop; try 50+ for better results\n", + "BATCH_SIZE = 64\n", + "LR = 2e-4 # Adam learning rate\n", + "LATENT_DIM = 32 # Size of random noise vector fed to generator\n", + "# ---------- Generation ----------\n", + "N_SAMPLES = 24 # Designs to generate for Notebook 02\n", + "\n", + "# ---------- Device ----------\n", + "if th.cuda.is_available():\n", + " DEVICE = th.device(\"cuda\")\n", + "elif th.backends.mps.is_available():\n", + " DEVICE = th.device(\"mps\")\n", + "else:\n", + " DEVICE = th.device(\"cpu\")\n", + "print(\"Device:\", DEVICE)\n", + "\n", + "if \"google.colab\" in sys.modules and not th.cuda.is_available():\n", + " print(\"\\n⚠️ WARNING: No GPU detected! Training will be very slow (~1 min/epoch).\")\n", + " print(\" Go to: Runtime → Change runtime type → T4 GPU → Save\")\n", + " print(\" Then re-run from the top.\\n\")\n", + "\n", + "# ---------- Artifact paths ----------\n", + "ARTIFACT_DIR = Path(\"/content/dcc26_artifacts\") if \"google.colab\" in sys.modules else Path(\"workshops/dcc26/artifacts\")\n", + "ARTIFACT_DIR.mkdir(parents=True, exist_ok=True)\n", + "\n", + "CKPT_PATH = ARTIFACT_DIR / \"engiopt_cgan2d_generator_supervised.pt\"\n", + "HISTORY_PATH = ARTIFACT_DIR / \"training_history.csv\"\n", + "TRAIN_CURVE_PATH = ARTIFACT_DIR / \"training_curve.png\"\n", + "\n", + "# ---------- Seed everything ----------\n", + "random.seed(SEED)\n", + "np.random.seed(SEED)\n", + "th.manual_seed(SEED)\n", + "if th.cuda.is_available():\n", + " th.cuda.manual_seed_all(SEED)\n", + "\n", + "print(\"Problem: \", PROBLEM_ID)\n", + "print(\"Artifact dir:\", ARTIFACT_DIR)" + ], + "id": "cell-8" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n\n## 3. Load the EngiBench problem\n\nSame API you used in Notebook 00 — every problem exposes `.dataset`,\n`.conditions_keys`, and `.design_space`." + ], + "id": "cell-9" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "problem = BUILTIN_PROBLEMS[PROBLEM_ID](seed=SEED)\ntrain_ds = problem.dataset[\"train\"]\ntest_ds = problem.dataset[\"test\"]\n\ncondition_keys = problem.conditions_keys\ndesign_shape = problem.design_space.shape\nn_conds = len(condition_keys)\n\nprint(f\"Problem : {type(problem).__name__}\")\nprint(f\"Design shape : {design_shape}\")\nprint(f\"Condition keys : {condition_keys}\")\nprint(f\"Train examples : {len(train_ds)}\")\nprint(f\"Test examples : {len(test_ds)}\")" + ], + "id": "cell-10" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Quick look at a few training designs\nshow_design_gallery(problem.dataset, problem, n=4, seed=SEED)" + ], + "id": "cell-11" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n\n## 4. FILL-IN 01-A: Prepare training data\n\nThe EngiBench dataset stores conditions and designs as separate columns.\nTo train a neural network we need to extract them into numeric arrays:\n\n1. **Conditions**: a `(N, n_conds)` array of floats — one row per sample, one column per condition key\n2. **Designs**: a `(N, H, W)` array of pixel values\n\nWe use the **full training set** so the model sees as many examples as possible.\nWe also rescale designs from `[0, 1]` to `[-1, 1]` because the generator uses a\n`tanh` output layer (which naturally outputs that range)." + ], + "id": "cell-12" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# FILL-IN CELL 01-A\n# Goal: extract conditions and designs from the full EngiBench training set.\n\nrng = np.random.default_rng(SEED)\n\n# START FILL ---------------------------------------------------------------\n\n# 1. Stack all condition columns into one (N, n_conds) array\nconds_np = np.stack(\n [np.array(train_ds[k]).astype(np.float32) for k in condition_keys],\n axis=1,\n)\n\n# 2. Extract the optimal designs\ndesigns_np = np.array(train_ds[\"optimal_design\"]).astype(np.float32)\n\n# 3. Rescale designs from [0, 1] to [-1, 1]\ntargets_np = designs_np * 2.0 - 1.0\n\n# END FILL -----------------------------------------------------------------\n\n# CHECKPOINT\nn_train = len(train_ds)\nassert conds_np.shape == (n_train, n_conds), (\n f\"Expected conditions shape ({n_train}, {n_conds}), got {conds_np.shape}\"\n)\nassert targets_np.shape == (n_train, *design_shape), (\n f\"Expected targets shape ({n_train}, {', '.join(map(str, design_shape))}), got {targets_np.shape}\"\n)\nassert targets_np.min() >= -1.0 and targets_np.max() <= 1.0, (\n f\"Targets should be in [-1, 1], got [{targets_np.min():.2f}, {targets_np.max():.2f}]\"\n)\nprint(f\"CHECKPOINT passed: {n_train} samples, conditions {conds_np.shape}, targets {targets_np.shape}\")" + ], + "id": "cell-13" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n\n## 5. The Generator model\n\nWe use a **convolutional conditional generator** (cDCGAN) from EngiOpt. Unlike a\nsimple fully-connected network that treats the design as a flat vector of pixels,\nthis model uses **transposed convolutions** that upsample a small feature map\ninto a full-resolution design image — preserving spatial structure at every step.\n\n```\nnoise (32, 1, 1) ──► ConvT ──┐\n ├─► concat (256, 7, 7)\nconditions (4, 1, 1) ► ConvT ┘ │\n ▼\n ConvT 7×7 → 13×13\n ConvT 13×13 → 25×25\n ConvT 25×25 → 50×50\n ConvT 50×50 → 100×100 → resize → design\n```\n\nThis **convolutional inductive bias** is why CNN generators produce much sharper\ndesigns than MLP generators: each layer reasons about local spatial\nneighbourhoods rather than treating every pixel independently." + ], + "id": "cell-14" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Wrap the CNN generator so it accepts flat (B, dim) inputs\nfrom notebook_helpers import WorkshopGenerator\n\ncnn_gen = EngiOptCNNGenerator(\n latent_dim=LATENT_DIM,\n n_conds=n_conds,\n design_shape=design_shape,\n)\nmodel = WorkshopGenerator(cnn_gen).to(DEVICE)\n\nn_params = sum(p.numel() for p in model.parameters())\nprint(f\"Generator created: {n_params:,} parameters\")\nprint(f\"Input: noise ({LATENT_DIM}) + conditions ({n_conds}) = {LATENT_DIM + n_conds}\")\nprint(f\"Output: {' x '.join(map(str, design_shape))} design image\")" + ], + "id": "cell-15" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n\n## 6. FILL-IN 01-B: Train the model\n\nTraining is **supervised**: for each sample, the model sees random noise +\nconditions and tries to reproduce the optimal design. The loss measures\npixel-by-pixel error (MSE).\n\nWe provide a `train_supervised_generator()` helper that handles the training\nloop. Your job: **call it with the right arguments and experiment with\nsettings.**\n\n> **Try it:** After training with the default 8 epochs, change `EPOCHS` to 20\n> or 50 in the config cell above, re-run from there, and see how the loss and\n> designs change." + ], + "id": "cell-16" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# FILL-IN CELL 01-B\n# Goal: train the generator. Experiment with EPOCHS and N_TRAIN.\n\n# Pick a few test conditions for snapshot visualization during training\nsnap_idx = rng.choice(len(test_ds), size=4, replace=False)\nsnap_conds = np.stack(\n [np.array(test_ds[k])[snap_idx].astype(np.float32) for k in condition_keys],\n axis=1,\n)\nsnap_baselines = np.array(test_ds[\"optimal_design\"])[snap_idx].astype(np.float32)\n\n# START FILL ---------------------------------------------------------------\n\n# Call the training helper. It returns {\"losses\": [...], \"snapshots\": [...]}.\ntrain_result = train_supervised_generator(\n model,\n conds_np,\n targets_np,\n latent_dim=LATENT_DIM,\n epochs=EPOCHS,\n batch_size=BATCH_SIZE,\n lr=LR,\n device=DEVICE,\n snapshot_conditions=snap_conds,\n snapshot_at_epochs=[1, max(1, EPOCHS // 2), EPOCHS],\n)\n\n# END FILL -----------------------------------------------------------------\n\ntrain_losses = train_result[\"losses\"]\nsnapshots = train_result[\"snapshots\"]\n\n# Save checkpoint\nth.save(model.state_dict(), CKPT_PATH)\n\n# CHECKPOINT\nassert len(train_losses) == EPOCHS, f\"Expected {EPOCHS} loss values, got {len(train_losses)}\"\nassert train_losses[-1] < train_losses[0], (\n \"Loss did not decrease — check your training arguments.\"\n)\nprint(f\"\\nCHECKPOINT passed: trained for {EPOCHS} epochs, final loss {train_losses[-1]:.6f}\")" + ], + "id": "cell-17" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Training loss curve\n\nThe loss should decrease over epochs. A flat or increasing loss means something\nwent wrong. Note that even a decreasing loss does not guarantee good designs —\nMSE rewards blurry averages." + ], + "id": "cell-18" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Save training history\nimport pandas as pd\npd.DataFrame({\"epoch\": range(1, len(train_losses) + 1), \"loss\": train_losses}).to_csv(\n HISTORY_PATH, index=False,\n)\n\nshow_training_curve(train_losses, save_path=str(TRAIN_CURVE_PATH))" + ], + "id": "cell-19" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### How the generator learns\n\nBelow you can see what the generator produces at different points during\ntraining. Early outputs are random noise; later outputs start to resemble beam\nstructures. The ground-truth row shows what the model is trying to match." + ], + "id": "cell-20" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_training_progression(snapshots, baseline_designs=snap_baselines, n_show=4)" + ], + "id": "cell-21" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n\n## 7. FILL-IN 01-C: Generate designs from test conditions\n\nNow for the payoff: use your trained model to produce designs for **conditions\nit has never seen** (from the held-out test set).\n\nIf the model generalises, it should produce reasonable designs for new\nconditions without running the optimizer. The `generate_designs()` helper\nhandles the inference — you just need to:\n\n1. Pick test conditions from the EngiBench dataset\n2. Call the generator\n3. Also extract the ground-truth baselines for comparison" + ], + "id": "cell-22" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# FILL-IN CELL 01-C\n# Goal: generate N_SAMPLES designs conditioned on test-set conditions.\n\n# START FILL ---------------------------------------------------------------\n\n# 1. Sample test indices\ntest_idx = rng.choice(len(test_ds), size=N_SAMPLES, replace=False)\n\n# 2. Extract test conditions and ground-truth baseline designs\ntest_conds_np = np.stack(\n [np.array(test_ds[k])[test_idx].astype(np.float32) for k in condition_keys],\n axis=1,\n)\nbaseline_designs = np.array(test_ds[\"optimal_design\"])[test_idx].astype(np.float32)\n\n# 3. Generate designs using the trained model\ngen_designs = generate_designs(\n model, test_conds_np, latent_dim=LATENT_DIM, device=DEVICE,\n)\n\n# 4. Build condition records (list of dicts) for export\nconditions_records = [\n {k: float(test_conds_np[i, j]) for j, k in enumerate(condition_keys)}\n for i in range(N_SAMPLES)\n]\n\n# END FILL -----------------------------------------------------------------\n\n# CHECKPOINT\nassert gen_designs.shape == baseline_designs.shape, (\n f\"Shape mismatch: generated {gen_designs.shape} vs baseline {baseline_designs.shape}\"\n)\nassert len(conditions_records) == N_SAMPLES\nassert 0.0 <= gen_designs.min() and gen_designs.max() <= 1.0, (\n f\"Generated designs should be in [0, 1], got [{gen_designs.min():.2f}, {gen_designs.max():.2f}]\"\n)\nprint(f\"CHECKPOINT passed: generated {N_SAMPLES} designs, shape {gen_designs.shape}\")" + ], + "id": "cell-23" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n\n## 8. Visual comparison: Generated vs Ground Truth\n\nEach column shows the same conditions. Top row = your model's output; bottom row\n= the optimizer's solution from the dataset.\n\n**What to look for:**\n- **Blurriness:** generated designs are often blurry because MSE loss averages\n over possible solutions\n- **Structure:** do the generated designs have recognisable beam topology (load\n paths, supports)?\n- **Condition sensitivity:** do different conditions produce visibly different\n designs, or does the model output the same thing regardless?" + ], + "id": "cell-24" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_gen_vs_baseline(gen_designs, baseline_designs, conditions_records, condition_keys)" + ], + "id": "cell-25" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n\n## 9. Export artifacts for Notebook 02\n\nNotebook 02 needs three files to run its evaluation pipeline:\n- `generated_designs.npy` — your model's output\n- `baseline_designs.npy` — ground-truth designs from the dataset\n- `conditions.json` — the conditions used for generation" + ], + "id": "cell-26" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "np.save(ARTIFACT_DIR / \"generated_designs.npy\", gen_designs)\nnp.save(ARTIFACT_DIR / \"baseline_designs.npy\", baseline_designs)\nwith open(ARTIFACT_DIR / \"conditions.json\", \"w\") as f:\n json.dump(conditions_records, f, indent=2)\n\n# Verify\nrequired = [\"generated_designs.npy\", \"baseline_designs.npy\", \"conditions.json\"]\nmissing = [f for f in required if not (ARTIFACT_DIR / f).exists()]\nassert not missing, f\"Missing: {missing}\"\nprint(f\"Exported to {ARTIFACT_DIR}:\")\nfor f in required:\n print(f\" {f}\")" + ], + "id": "cell-27" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n\n## Discussion\n\n### What you have seen\n\nYou trained a neural network on a few hundred examples for a few epochs and used\nit to produce beam designs in milliseconds. The results are imperfect — and that\nis exactly the point.\n\n### Questions to think about\n\n1. **Why are the designs blurry?** MSE loss penalises pixel-wise error, which\n rewards the *average* of all plausible designs rather than any single sharp\n one. What alternative losses or model architectures might produce crisper\n output? (Think: adversarial loss, diffusion models, VAEs.)\n\n2. **Does the model respond to conditions?** Compare designs generated for very\n different volume fractions or load distributions. If they all look the same,\n the model may have learned the dataset mean rather than the\n condition → design relationship. What might help? (More training data? More\n epochs? A different architecture?)\n\n3. **From pixels to physics.** A design can *look* reasonable but fail under\n simulation — disconnected material, wrong volume fraction, stress\n concentrations. Notebook 02 will run the physics solver on your generated\n designs and quantify these failures.\n\n4. **The benchmarking motivation.** We do not know how bad these designs are\n until we *measure*. That is the role of a benchmark: providing standardised\n evaluation so we can compare methods, track progress, and avoid fooling\n ourselves with visual inspection alone.\n\n5. **What would you change?** If you had an hour instead of 30 minutes, what\n would you try? More data, more epochs, a different model, a different loss\n function? How would you decide whether it *actually* improved?" + ], + "id": "cell-28" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n\n## Next\n\nProceed to **Notebook 02** to evaluate your generated designs with physics-based\nsimulation and compute benchmark metrics. Your exported artifacts are the input." + ], + "id": "cell-29" + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "name": "python", + "version": "3.11.0" + }, + "accelerator": "GPU", + "colab": { + "provenance": [], + "gpuType": "T4" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/workshops/dcc26/solutions/02_evaluate_metrics.ipynb b/workshops/dcc26/solutions/02_evaluate_metrics.ipynb new file mode 100644 index 0000000..47fd4c5 --- /dev/null +++ b/workshops/dcc26/solutions/02_evaluate_metrics.ipynb @@ -0,0 +1,1254 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Notebook 02 (Solution): Evaluate Your Generated Designs\n", + "\n", + "In Notebook 01 you trained a generative model and produced candidate beam designs.\n", + "Now comes the critical question: **are those designs actually any good?**\n", + "\n", + "In generative modeling for engineering, \"good\" is **not** a single number.\n", + "A design can look plausible yet fail simulation. It can perform well on one\n", + "objective yet violate a critical constraint. It can be high-quality but\n", + "identical to a training example -- memorised, not generalised.\n", + "\n", + "This notebook walks you through a **structured evaluation pipeline** that\n", + "diagnoses generative model quality from multiple complementary angles, each\n", + "revealing something the others miss." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## A taxonomy of generative-model metrics\n", + "\n", + "```\n", + " How do we evaluate a generative model?\n", + " |\n", + " ┌───────────────┬───────────┼───────────────┬──────────────────┐\n", + " | | | | |\n", + " Visual Simulation Constraint Distributional Diversity &\n", + " Inspection Performance Satisfaction Similarity Coverage\n", + " | | | | |\n", + " \"Does it \"Does it \"Is it \"Does it \"Did we\n", + " look right?\" work?\" legal?\" match explore?\"\n", + " reality?\"\n", + " | | | | |\n", + " Residual Compliance Volfrac error MMD Pairwise L2\n", + " heatmaps histogram distribution (Gaussian DPP diversity\n", + " + scatter + feasibility kernel) NN novelty\n", + " + per-sample rate bars PCA embedding\n", + " gap bars\n", + "```\n", + "\n", + "No single metric tells the whole story. A model can ace one category and\n", + "fail another -- and *which failure matters most* depends on your application.\n", + "\n", + "| Category | Question | Beams2D metric | Why it matters |\n", + "|----------|----------|---------------|----------------|\n", + "| **Visual inspection** | Does it look like a real beam? | Residual heatmaps | Quick sanity check; catches gross failures |\n", + "| **Simulation performance** | Does the physics solver confirm it works? | Compliance gap vs baseline | The ground truth -- simulation is our oracle |\n", + "| **Constraint satisfaction** | Does it obey the engineering spec? | Volume fraction error | A stiff beam using too much material is invalid |\n", + "| **Distributional similarity** | Does the generator match the real data distribution? | MMD (Maximum Mean Discrepancy) | Detects mode collapse, unrealistic densities |\n", + "| **Diversity & coverage** | Did the model explore, or did it memorise? | Pairwise L2, DPP, NN novelty | A model outputting one beam 24 times is useless |\n", + "| **Optimization warmstarting** | Does it give the optimizer a head start? | IOG, COG, FOG | The ultimate downstream utility test |" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### The evaluation pipeline at a glance\n", + "\n", + "```\n", + "Generated Designs Baseline Designs Training Designs\n", + " | | |\n", + " v v v\n", + " [ Visual Inspection ] [ Reference set ]\n", + " | | |\n", + " v v |\n", + " [ Simulate ] [ Simulate ] |\n", + " | | |\n", + " v v |\n", + " Objectives Objectives |\n", + " \\ / |\n", + " \\ / |\n", + " v v |\n", + " Simulation Metrics |\n", + " (gap, improvement rate) |\n", + " | |\n", + " v v\n", + " Constraint Metrics Distributional Metrics\n", + " (volfrac error, feasibility) (MMD, pixel distributions)\n", + " | |\n", + " v v\n", + " Diversity Metrics <──────────────── PCA Embedding\n", + " (pairwise L2, DPP, NN novelty)\n", + " |\n", + " v\n", + " Optimization Warmstarting\n", + " (IOG, COG, FOG trajectories)\n", + " |\n", + " v\n", + " Summary Dashboard\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Edit-safe start:** this notebook opens from GitHub in read-only source mode. Use **File -> Save a copy in Drive** before running edits so your changes stay in your own workspace." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Notebook map\n", + "\n", + "| Part | What you do | Key output |\n", + "|------|-------------|------------|\n", + "| Setup | Install deps, load artifacts | `gen_designs`, `baseline_designs`, `conditions` |\n", + "| Part 1 | Visual inspection (the eye test) | Residual heatmaps |\n", + "| Part 2 (Fill-in 02-A) | Per-sample simulation | `results` DataFrame |\n", + "| Part 3 | Constraint satisfaction analysis | Volfrac scatter + error distribution |\n", + "| Part 4 (Fill-in 02-B) | Distributional similarity (MMD) | `mmd_value` |\n", + "| Part 5 | Diversity & coverage | Pairwise heatmap + PCA embedding |\n", + "| Part 6 | Optimization warmstarting (demo) | Trajectory plots with IOG/COG/FOG |\n", + "| Part 7 (Fill-in 02-C) | Comprehensive summary dashboard | `summary_df` |\n", + "\n", + "### Legend\n", + "- `PUBLIC FILL-IN CELL` -- you write code here.\n", + "- `CHECKPOINT` -- run this assertion block to verify before moving on.\n", + "- `# START FILL` / `# END FILL` -- your edits go between these markers." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Step 0: Install dependencies" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Colab/local dependency bootstrap\n", + "import subprocess, sys\n", + "\n", + "IN_COLAB = \"google.colab\" in sys.modules\n", + "FORCE_INSTALL = False # Set True to force install outside Colab\n", + "\n", + "if IN_COLAB or FORCE_INSTALL:\n", + " def pip_install(pkgs):\n", + " subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", *pkgs])\n", + "\n", + " pip_install([\"engibench[all]\", \"sqlitedict\", \"matplotlib\", \"tqdm\", \"tyro\", \"wandb\"])\n", + " pip_install([\"git+https://github.com/IDEALLab/EngiOpt.git@codex/dcc26-workshop-notebooks#egg=engiopt\"])\n", + " try:\n", + " import torch\n", + " except Exception:\n", + " pip_install([\"torch\", \"torchvision\"])\n", + " print(\"Install complete.\")\n", + "else:\n", + " print(\"Using current environment. Set FORCE_INSTALL=True to install here.\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Step 1: Load artifacts from Notebook 01\n", + "\n", + "We need three files that Notebook 01 exported:\n", + "- `generated_designs.npy` -- the designs your model produced\n", + "- `baseline_designs.npy` -- optimised reference designs from the dataset\n", + "- `conditions.json` -- the boundary-condition configs for each sample\n", + "\n", + "The next cell contains a recovery function that **automatically rebuilds** these\n", + "artifacts if they are missing (e.g., if you jumped straight to NB02). You do not\n", + "need to read or understand that function -- just run it." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# ── Artifact recovery (runs only if NB01 artifacts are missing) ──────────\n", + "# This cell auto-builds NB01 artifacts so NB02 works standalone.\n", + "# You do NOT need to read this code -- just run the cell.\n", + "\n", + "import importlib\n", + "import json, random, sys, os\n", + "from pathlib import Path\n", + "import numpy as np\n", + "import pandas as pd\n", + "import torch as th\n", + "import torch.nn as nn\n", + "from torch.utils.data import DataLoader, TensorDataset\n", + "\n", + "# Workshop helpers\n", + "if \"google.colab\" in sys.modules:\n", + " import subprocess as _sp\n", + " _utils = \"/content/workshop_utils\"\n", + " os.makedirs(_utils, exist_ok=True)\n", + " _branch = \"codex/dcc26-workshop-notebooks\"\n", + " _base = f\"https://raw.githubusercontent.com/IDEALLab/EngiOpt/{_branch}/workshops/dcc26/utils\"\n", + " for _f in (\"notebook_helpers.py\", \"__init__.py\"):\n", + " if not os.path.exists(f\"{_utils}/{_f}\"):\n", + " _sp.check_call([\"wget\", \"-q\", f\"{_base}/{_f}\", \"-O\", f\"{_utils}/{_f}\"])\n", + "else:\n", + " _utils = os.path.abspath(\"../utils\") if os.path.isdir(\"../utils\") else \"workshops/dcc26/utils\"\n", + "sys.path.insert(0, _utils)\n", + "import notebook_helpers # noqa: E402\n", + "importlib.reload(notebook_helpers) # always pick up latest edits\n", + "from notebook_helpers import * # noqa: F401,F403\n", + "\n", + "from engibench.utils.all_problems import BUILTIN_PROBLEMS\n", + "\n", + "PROBLEM_ID = \"beams2d\"\n", + "\n", + "try:\n", + " from engiopt.cgan_2d.cgan_2d import Generator as EngiOptCGAN2DGenerator\n", + "except ModuleNotFoundError as exc:\n", + " raise ModuleNotFoundError(\n", + " \"Could not import engiopt. Run the install cell first; on Colab, restart runtime after install.\"\n", + " ) from exc\n", + "\n", + "\n", + "def _resolve_artifact_dir(create=False):\n", + " p = Path(\"/content/dcc26_artifacts\") if \"google.colab\" in sys.modules else Path(\"workshops/dcc26/artifacts\")\n", + " if create:\n", + " p.mkdir(parents=True, exist_ok=True)\n", + " return p\n", + "\n", + "\n", + "def _build_artifacts_locally(artifact_dir, seed=7, n_train=512, n_samples=24, epochs=8, batch_size=64, latent_dim=32):\n", + " \"\"\"Replicate the NB01 train+generate pipeline to produce evaluation artifacts.\"\"\"\n", + " print(\"Auto-building NB01 artifacts (this takes ~1 min)...\")\n", + " random.seed(seed); np.random.seed(seed); th.manual_seed(seed)\n", + " if th.cuda.is_available(): th.cuda.manual_seed_all(seed)\n", + " device = th.device(\"cuda\" if th.cuda.is_available() else \"cpu\")\n", + " problem = BUILTIN_PROBLEMS[PROBLEM_ID](seed=seed)\n", + " train_ds, test_ds = problem.dataset[\"train\"], problem.dataset[\"test\"]\n", + " ckeys = problem.conditions_keys\n", + " rng = np.random.default_rng(seed)\n", + " idx = rng.choice(len(train_ds), size=min(n_train, len(train_ds)), replace=False)\n", + " conds = np.stack([np.array(train_ds[k])[idx].astype(np.float32) for k in ckeys], axis=1)\n", + " designs = np.array(train_ds[\"optimal_design\"])[idx].astype(np.float32)\n", + " targets = designs * 2.0 - 1.0\n", + " model = EngiOptCGAN2DGenerator(latent_dim=latent_dim, n_conds=conds.shape[1], design_shape=problem.design_space.shape).to(device)\n", + " opt = th.optim.Adam(model.parameters(), lr=1e-3)\n", + " crit = nn.MSELoss()\n", + " dl = DataLoader(TensorDataset(th.tensor(conds), th.tensor(targets)), batch_size=batch_size, shuffle=True)\n", + " losses = []\n", + " for ep in range(epochs):\n", + " model.train(); ep_loss = 0.0\n", + " for cb, tb in dl:\n", + " cb, tb = cb.to(device), tb.to(device)\n", + " pred = model(th.randn(cb.shape[0], latent_dim, device=device), cb)\n", + " loss = crit(pred, tb); opt.zero_grad(); loss.backward(); opt.step()\n", + " ep_loss += loss.item()\n", + " avg = ep_loss / len(dl); losses.append(avg)\n", + " print(f\" epoch {ep+1:02d}/{epochs} loss={avg:.4f}\")\n", + " sc = min(n_samples, len(test_ds))\n", + " sel = rng.choice(len(test_ds), size=sc, replace=False)\n", + " tc = np.stack([np.array(test_ds[k])[sel].astype(np.float32) for k in ckeys], axis=1)\n", + " bl = np.array(test_ds[\"optimal_design\"])[sel].astype(np.float32)\n", + " model.eval()\n", + " with th.no_grad():\n", + " out = model(th.randn(sc, latent_dim, device=device), th.tensor(tc, device=device))\n", + " gd = ((out.clamp(-1, 1) + 1) / 2).clamp(0, 1).cpu().numpy().astype(np.float32)\n", + " cond_recs = []\n", + " for i in range(sc):\n", + " rec = {}\n", + " for j, k in enumerate(ckeys):\n", + " rec[k] = bool(tc[i, j]) if k == \"overhang_constraint\" else float(tc[i, j])\n", + " cond_recs.append(rec)\n", + " artifact_dir.mkdir(parents=True, exist_ok=True)\n", + " np.save(artifact_dir / \"generated_designs.npy\", gd)\n", + " np.save(artifact_dir / \"baseline_designs.npy\", bl)\n", + " with open(artifact_dir / \"conditions.json\", \"w\") as f: json.dump(cond_recs, f, indent=2)\n", + " pd.DataFrame({\"epoch\": range(1, len(losses)+1), \"train_loss\": losses}).to_csv(artifact_dir / \"training_history.csv\", index=False)\n", + " th.save({\"model\": model.state_dict(), \"condition_keys\": ckeys, \"latent_dim\": latent_dim}, artifact_dir / \"engiopt_cgan2d_generator_supervised.pt\")\n", + " print(\"Artifacts ready at\", artifact_dir)\n", + "\n", + "\n", + "ARTIFACT_DIR = _resolve_artifact_dir(create=True)\n", + "_required = [ARTIFACT_DIR / f for f in (\"generated_designs.npy\", \"baseline_designs.npy\", \"conditions.json\")]\n", + "if not all(p.exists() for p in _required):\n", + " _build_artifacts_locally(ARTIFACT_DIR)\n", + "\n", + "print(\"Artifact directory:\", ARTIFACT_DIR)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# ── Load artifacts ───────────────────────────────────────────────────────\n", + "import json\n", + "import numpy as np\n", + "import pandas as pd\n", + "import matplotlib.pyplot as plt\n", + "from scipy.spatial.distance import cdist\n", + "\n", + "gen_designs = np.load(ARTIFACT_DIR / \"generated_designs.npy\")\n", + "baseline_designs = np.load(ARTIFACT_DIR / \"baseline_designs.npy\")\n", + "with open(ARTIFACT_DIR / \"conditions.json\") as f:\n", + " conditions = json.load(f)\n", + "\n", + "print(f\"Generated designs : {gen_designs.shape} (values in [{gen_designs.min():.2f}, {gen_designs.max():.2f}])\")\n", + "print(f\"Baseline designs : {baseline_designs.shape}\")\n", + "print(f\"Condition records : {len(conditions)}\")\n", + "print(f\"Condition keys : {list(conditions[0].keys())}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Load a reference subset of training designs for distributional + novelty metrics\n", + "problem_ref = BUILTIN_PROBLEMS[PROBLEM_ID](seed=7)\n", + "train_designs_full = np.array(problem_ref.dataset[\"train\"][\"optimal_design\"]).astype(np.float32)\n", + "ref_idx = np.random.default_rng(7).choice(\n", + " len(train_designs_full), size=min(1024, len(train_designs_full)), replace=False\n", + ")\n", + "train_reference = train_designs_full[ref_idx]\n", + "print(f\"Training reference set: {train_reference.shape[0]} designs\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Part 1: Visual Inspection -- The Eye Test\n", + "\n", + "Before computing any metric, **look at the designs**. Visual inspection catches\n", + "gross failures immediately: is the model producing solid blocks? random noise?\n", + "something that looks vaguely beam-like?\n", + "\n", + "We show three views:\n", + "1. **Side-by-side gallery** -- generated vs optimised baseline\n", + "2. **Pixel residual heatmaps** -- where exactly do the designs differ?\n", + "\n", + "Visual inspection is *necessary* but **not sufficient**. A design can look\n", + "plausible yet perform terribly in simulation, or violate constraints that\n", + "are invisible to the eye. The rest of this notebook quantifies what your\n", + "eyes cannot." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_residual_heatmaps(gen_designs, baseline_designs, n_show=6)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Takeaway:** The residual heatmaps reveal where the generator struggles most.\n", + "Bright regions = large pixel error. Notice how errors tend to cluster at\n", + "structural boundaries and fine features -- exactly the details that matter\n", + "most for physical performance.\n", + "\n", + "But pixels alone don't tell us about *compliance*, *constraint violations*, or\n", + "*diversity*. We need simulation." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Part 2: Simulation Performance -- \"Does it work?\"\n", + "\n", + "The **physics simulator** is our oracle. For Beams2D, it computes the\n", + "*compliance* of each design under the given boundary conditions:\n", + "- **Lower compliance = stiffer beam = better design**\n", + "\n", + "We simulate both the generated design and its corresponding baseline\n", + "(the optimised design from the dataset) under **identical conditions**.\n", + "The difference tells us how far the generator is from optimal.\n", + "\n", + "> **Analogy:** Imagine you asked an architecture student to sketch a bridge.\n", + "> Visual inspection tells you the sketch looks bridge-like. But only a\n", + "> structural engineer (our simulator) can tell you whether it would actually\n", + "> stand up." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "problem = BUILTIN_PROBLEMS[PROBLEM_ID](seed=7)\n", + "\n", + "# Feasibility tolerance: how close must volfrac be to the target?\n", + "VOLFRAC_TOL = 0.05\n", + "\n", + "# PUBLIC FILL-IN CELL 02-A\n", + "# Goal: build a list of dicts, one per sample, with objective + feasibility info.\n", + "#\n", + "# For each sample i, you have:\n", + "# g = gen_designs[i] -- generated design (2D numpy array)\n", + "# b = baseline_designs[i] -- baseline design (2D numpy array)\n", + "# cfg = conditions[i] -- dict with keys like 'volfrac', 'rmin', etc.\n", + "\n", + "rows = []\n", + "\n", + "# START FILL ---------------------------------------------------------------\n", + "for i in range(len(gen_designs)):\n", + " g = gen_designs[i]\n", + " b = baseline_designs[i]\n", + " cfg = dict(conditions[i])\n", + "\n", + " # 1) Compute volume fractions (mean pixel value of each design)\n", + " g_vf = float(np.mean(g))\n", + " b_vf = float(np.mean(b))\n", + " target_vf = cfg[\"volfrac\"]\n", + "\n", + " # 2) Check feasibility: is |actual_vf - target_vf| <= VOLFRAC_TOL?\n", + " g_feasible = bool(abs(g_vf - target_vf) <= VOLFRAC_TOL)\n", + " b_feasible = bool(abs(b_vf - target_vf) <= VOLFRAC_TOL)\n", + "\n", + " # 3) Simulate both designs under identical conditions\n", + " problem.reset(seed=7 + i)\n", + " g_obj = float(problem.simulate(design=g, config=cfg)[0])\n", + " problem.reset(seed=7 + i)\n", + " b_obj = float(problem.simulate(design=b, config=cfg)[0])\n", + "\n", + " # 4) Record everything\n", + " rows.append({\n", + " \"sample\": i,\n", + " \"gen_obj\": g_obj,\n", + " \"base_obj\": b_obj,\n", + " \"gen_minus_base\": g_obj - b_obj,\n", + " \"gen_volfrac\": g_vf,\n", + " \"target_volfrac\": target_vf,\n", + " \"gen_feasible\": g_feasible,\n", + " \"base_feasible\": b_feasible,\n", + " })\n", + "# END FILL -----------------------------------------------------------------\n", + "\n", + "results = pd.DataFrame(rows)\n", + "results.head()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# CHECKPOINT 02-A\n", + "expected_cols = {\"sample\", \"gen_obj\", \"base_obj\", \"gen_minus_base\", \"gen_volfrac\",\n", + " \"target_volfrac\", \"gen_feasible\", \"base_feasible\"}\n", + "missing_cols = expected_cols - set(results.columns)\n", + "assert not missing_cols, f\"Missing columns: {missing_cols}\"\n", + "assert len(results) == len(gen_designs), f\"Expected {len(gen_designs)} rows, got {len(results)}\"\n", + "assert results[\"gen_obj\"].notna().all(), \"gen_obj contains NaN -- did you forget to simulate?\"\n", + "assert results[\"gen_feasible\"].dtype == bool, \"gen_feasible should be boolean\"\n", + "print(f\"Checkpoint 02-A passed: {len(results)} samples evaluated.\")\n", + "print(f\" Feasible generated: {results['gen_feasible'].sum()}/{len(results)}\")\n", + "print(f\" Feasible baseline: {results['base_feasible'].sum()}/{len(results)}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Visualising simulation results\n", + "\n", + "Three complementary views:\n", + "1. **Histogram** -- overall distribution of objectives (generated vs baseline)\n", + "2. **Scatter plot** -- per-sample pairing (points below diagonal = generated is better)\n", + "3. **Residual bar chart** -- per-sample gap, signed (green = generated outperforms)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_objective_comparison(results)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_objective_residuals(results)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Reading the simulation results\n", + "\n", + "- **Histogram overlap**: If the blue (generated) and orange (baseline) distributions\n", + " overlap heavily, the generator is competitive. If blue is shifted right (higher\n", + " compliance), the generator produces weaker designs.\n", + "\n", + "- **Scatter diagonal**: Points *below* the diagonal line mean the generated design\n", + " outperformed the optimised baseline for that sample -- a strong result.\n", + "\n", + "- **Residual bars**: The bar chart makes the per-sample gap immediately visible.\n", + " Consistent green bars = the model is competitive. Large red bars = specific\n", + " failure modes worth investigating (check the design images for those samples)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Part 3: Constraint Satisfaction -- \"Is it legal?\"\n", + "\n", + "A design that performs well but **violates constraints** is useless in practice.\n", + "For Beams2D, the key constraint is **volume fraction**: the design must use\n", + "a specific amount of material (neither too much nor too little).\n", + "\n", + "> **Analogy:** An architect who designs a beautiful building that exceeds the\n", + "> budget by 50% has not solved the problem -- they have created a new one.\n", + "\n", + "We already computed `gen_volfrac` and `target_volfrac` in the simulation loop.\n", + "Now let's visualise how well the generator satisfies this constraint." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_volfrac_analysis(results, volfrac_tol=VOLFRAC_TOL)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_feasibility_bars(results)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Reading the constraint results\n", + "\n", + "- **Left scatter**: Points near the diagonal are feasible; points far from it\n", + " are violating the volume fraction constraint. The green band shows the\n", + " tolerance window.\n", + "\n", + "- **Error histogram**: A narrow distribution centered at zero means the generator\n", + " has learned to control material usage. A wide or biased distribution suggests\n", + " the model ignores the volume fraction condition.\n", + "\n", + "- **Feasibility rate**: The bar chart gives the bottom line. If the baseline\n", + " achieves ~100% feasibility but the generator is at 50%, there is a clear\n", + " conditioning failure.\n", + "\n", + "**Why this matters beyond beams:** In real engineering, constraints can be\n", + "stress limits, manufacturing tolerances, thermal budgets, or regulatory\n", + "requirements. A generative model that ignores constraints generates\n", + "*interesting but unusable* designs." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Part 4: Distributional Similarity -- \"Does it match reality?\"\n", + "\n", + "The previous metrics evaluated designs **individually** (per-sample objective,\n", + "per-sample feasibility). But we also need to ask: does the *distribution*\n", + "of generated designs match the distribution of the ground-truth optimal designs\n", + "**for the same conditions**?\n", + "\n", + "### What is MMD?\n", + "\n", + "**Maximum Mean Discrepancy (MMD)** is a kernel-based distance between two\n", + "distributions. Intuitively:\n", + "\n", + "1. Map each design into a high-dimensional feature space via a Gaussian kernel\n", + "2. Compare the *mean embeddings* of the two sets\n", + "3. If the means match, the distributions are similar; if they diverge, they are different\n", + "\n", + "$$\\text{MMD}^2 = \\underbrace{\\mathbb{E}[k(x, x')]}_{\\text{gen-gen similarity}} + \\underbrace{\\mathbb{E}[k(y, y')]}_{\\text{base-base similarity}} - 2\\,\\underbrace{\\mathbb{E}[k(x, y)]}_{\\text{cross similarity}}$$\n", + "\n", + "- **MMD = 0**: generated and baseline distributions are identical\n", + "- **MMD > 0**: they differ (larger = more different)\n", + "- The kernel bandwidth $\\sigma$ controls the scale of comparison\n", + "\n", + "### Why compare generated vs baseline?\n", + "\n", + "Our generator is **conditional** -- it takes test conditions and produces\n", + "designs. The baseline contains the ground-truth optima for those *same*\n", + "test conditions. Comparing generated vs baseline directly measures whether\n", + "the generator has learned to produce the right designs for the right conditions.\n", + "\n", + "### Choosing sigma without test-data leakage\n", + "\n", + "The Gaussian kernel bandwidth $\\sigma$ determines what scale of difference\n", + "the kernel is sensitive to. We set it using the **median heuristic** on the\n", + "*training data only* -- the median pairwise distance among training designs.\n", + "This avoids leaking test information into the metric while ensuring the\n", + "kernel operates in a meaningful range.\n", + "\n", + "### Why MMD and not just \"average quality\"?\n", + "\n", + "A model could produce 24 copies of the single best design. Per-sample metrics\n", + "would look great! But the *distribution* would be nothing like the diverse\n", + "baseline set. MMD catches this." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Visual intuition: where does each set place material?\n", + "show_spatial_distribution_comparison(gen_designs, baseline_designs, train_reference)\n", + "" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# PUBLIC FILL-IN CELL 02-B\n", + "# Goal: compute MMD between generated designs and baseline designs (same conditions).\n", + "#\n", + "# MMD uses a Gaussian (RBF) kernel: k(x,y) = exp(-||x-y||^2 / (2*sigma^2))\n", + "#\n", + "# We set sigma from the TRAINING data (median heuristic) to avoid test-data leakage,\n", + "# then apply that fixed sigma to the gen-vs-baseline comparison.\n", + "#\n", + "# You have:\n", + "# gen_designs -- (N, H, W) numpy array of generated designs\n", + "# baseline_designs -- (N, H, W) numpy array of optimized designs (same conditions)\n", + "# train_reference -- (M, H, W) numpy array of training designs\n", + "# cdist -- from scipy.spatial.distance (already imported)\n", + "#\n", + "# Steps:\n", + "# 1. Flatten all design sets to 2D\n", + "# 2. Compute sigma from training pairwise distances (median heuristic)\n", + "# 3. Compute pairwise squared distances between generated and baseline\n", + "# 4. Apply Gaussian kernel: K = exp(-D / (2 * sigma^2))\n", + "# 5. MMD = mean(K_gg) + mean(K_bb) - 2 * mean(K_gb)\n", + "\n", + "# START FILL ---------------------------------------------------------------\n", + "# 1. Flatten\n", + "gen_flat = gen_designs.reshape(gen_designs.shape[0], -1)\n", + "base_flat = baseline_designs.reshape(baseline_designs.shape[0], -1)\n", + "ref_flat = train_reference.reshape(train_reference.shape[0], -1)\n", + "\n", + "# 2. Sigma from training data only (no test leakage)\n", + "D_ref = cdist(ref_flat, ref_flat, \"sqeuclidean\")\n", + "sigma = float(np.sqrt(np.median(D_ref)))\n", + "print(f\"Sigma (median heuristic on training data): {sigma:.2f}\")\n", + "\n", + "# 3. Pairwise squared distances for gen vs baseline\n", + "D_gg = cdist(gen_flat, gen_flat, \"sqeuclidean\")\n", + "D_bb = cdist(base_flat, base_flat, \"sqeuclidean\")\n", + "D_gb = cdist(gen_flat, base_flat, \"sqeuclidean\")\n", + "\n", + "# 4. Gaussian kernel\n", + "K_gg = np.exp(-D_gg / (2 * sigma**2))\n", + "K_bb = np.exp(-D_bb / (2 * sigma**2))\n", + "K_gb = np.exp(-D_gb / (2 * sigma**2))\n", + "\n", + "# 5. MMD\n", + "mmd_value = float(K_gg.mean() + K_bb.mean() - 2 * K_gb.mean())\n", + "# END FILL -----------------------------------------------------------------\n", + "\n", + "print(f\"MMD(generated, baseline) = {mmd_value:.6f}\")\n", + "print(f\" K_gg mean (gen-gen similarity): {K_gg.mean():.6f}\")\n", + "print(f\" K_bb mean (base-base similarity): {K_bb.mean():.6f}\")\n", + "print(f\" K_gb mean (cross similarity): {K_gb.mean():.6f}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# CHECKPOINT 02-B\n", + "assert mmd_value is not None, \"mmd_value is None -- did you compute it?\"\n", + "assert isinstance(mmd_value, float), \"mmd_value should be a float\"\n", + "assert mmd_value >= 0, f\"MMD should be non-negative, got {mmd_value}\"\n", + "assert sigma is not None and sigma > 1, f\"sigma should be > 1 for 10k-dim data (got {sigma}); did you use the median heuristic?\"\n", + "assert K_gg is not None and K_gb is not None, \"Kernel matrices not computed\"\n", + "print(f\"Checkpoint 02-B passed: MMD = {mmd_value:.6f} (sigma = {sigma:.2f})\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Put MMD in context: compare against reference points (same training-derived sigma)\n", + "#\n", + "# 1. Training sample vs baseline: what you'd get by grabbing random training\n", + "# designs instead of conditioning on the test conditions.\n", + "# 2. Random noise vs baseline: worst case (meaningless generator).\n", + "\n", + "rng_mmd = np.random.default_rng(42)\n", + "\n", + "# Training sample vs baseline (no conditioning)\n", + "train_sample_idx = rng_mmd.choice(len(train_reference), size=len(baseline_designs), replace=False)\n", + "train_sample_flat = train_reference[train_sample_idx].reshape(len(baseline_designs), -1)\n", + "D_tt = cdist(train_sample_flat, train_sample_flat, \"sqeuclidean\")\n", + "D_tb = cdist(train_sample_flat, base_flat, \"sqeuclidean\")\n", + "mmd_train_base = float(\n", + " np.exp(-D_tt / (2*sigma**2)).mean()\n", + " + K_bb.mean()\n", + " - 2 * np.exp(-D_tb / (2*sigma**2)).mean()\n", + ")\n", + "\n", + "# Random noise vs baseline\n", + "random_designs = rng_mmd.random(gen_designs.shape).astype(np.float32)\n", + "rand_flat = random_designs.reshape(random_designs.shape[0], -1)\n", + "D_rr = cdist(rand_flat, rand_flat, \"sqeuclidean\")\n", + "D_rb = cdist(rand_flat, base_flat, \"sqeuclidean\")\n", + "mmd_random_base = float(\n", + " np.exp(-D_rr / (2*sigma**2)).mean()\n", + " + K_bb.mean()\n", + " - 2 * np.exp(-D_rb / (2*sigma**2)).mean()\n", + ")\n", + "\n", + "print(f\"MMD reference points (sigma={sigma:.2f}, from training data):\")\n", + "print(f\" Generated vs Baseline: {mmd_value:.6f} (our model)\")\n", + "print(f\" Train sample vs Baseline: {mmd_train_base:.6f} (no conditioning)\")\n", + "print(f\" Random noise vs Baseline: {mmd_random_base:.6f} (worst case)\")\n", + "\n", + "show_mmd_comparison_bar(mmd_value, mmd_train_base, mmd_random_base)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# PCA embedding: where do generated designs live relative to training data?\n", + "show_embedding_scatter(gen_designs, baseline_designs, train_reference)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Reading the distributional similarity results\n", + "\n", + "- **Mean design images**: If the generated mean image looks similar to the\n", + " baseline mean image, the model has learned where material should go on\n", + " average for these conditions. Differences reveal spatial biases.\n", + "\n", + "- **Volume fraction distributions**: If the generated distribution is narrower\n", + " or shifted relative to baseline, the model isn't capturing the full range\n", + " of volume fractions needed for these test conditions.\n", + "\n", + "- **MMD in context**: The comparison bar chart places the generator's MMD on\n", + " a meaningful scale:\n", + " - **Train sample vs Baseline** (retrieval baseline): What you'd get by\n", + " grabbing random training designs instead of conditioning. If the\n", + " generator beats this, it has genuinely learned to condition.\n", + " - **Random vs Baseline** (worst case): Uniform noise -- the floor for\n", + " a non-functional generator.\n", + " A generator close to zero has matched the baseline distribution. A\n", + " generator near the train-sample bar is no better than memorising\n", + " training data without using the conditions.\n", + "\n", + "- **PCA embedding**: If generated designs (blue) cluster tightly in one\n", + " corner while training data (grey) spans a wide region, the model has\n", + " **mode collapse**. Ideally, blue points should overlap with the orange\n", + " baseline points (same conditions) while spanning a similar spread." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Part 5: Diversity & Coverage -- \"Did we explore?\"\n", + "\n", + "A generative model should produce **varied** designs, not 24 copies of the same\n", + "beam. We measure two complementary aspects:\n", + "\n", + "### Diversity (intra-set variation)\n", + "How different are the generated designs from *each other*?\n", + "- **Pairwise L2 distance**: Average Euclidean distance between all pairs of\n", + " generated designs. Higher = more diverse.\n", + "- **DPP diversity**: Determinantal Point Process log-determinant of the\n", + " similarity matrix. Captures both volume and spread of the set.\n", + "\n", + "### Novelty (distance to training data)\n", + "How different are the generated designs from the *training set*?\n", + "- **Nearest-neighbour distance**: For each generated design, find the closest\n", + " training example. If NN distance is near zero, the model may be memorising.\n", + " Higher = more novel.\n", + "\n", + "> **The diversity-quality trade-off:** A model that generates random noise\n", + "> would score very high on diversity but terribly on quality. We want designs\n", + "> that are diverse *and* feasible *and* performant. This is the fundamental\n", + "> tension in generative model evaluation." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_pairwise_distance_heatmap(gen_designs)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Compute diversity and novelty metrics\n", + "diversity_l2 = mean_pairwise_l2(gen_designs)\n", + "novelty_nn = mean_nn_distance_to_reference(gen_designs, train_reference)\n", + "\n", + "# Also compute for baseline as a reference point\n", + "baseline_diversity = mean_pairwise_l2(baseline_designs)\n", + "baseline_novelty = mean_nn_distance_to_reference(baseline_designs, train_reference)\n", + "\n", + "print(\"Diversity (mean pairwise L2):\")\n", + "print(f\" Generated: {diversity_l2:.2f}\")\n", + "print(f\" Baseline: {baseline_diversity:.2f}\")\n", + "print()\n", + "print(\"Novelty (mean NN distance to training):\")\n", + "print(f\" Generated: {novelty_nn:.2f}\")\n", + "print(f\" Baseline: {baseline_novelty:.2f}\")\n", + "print()\n", + "if diversity_l2 < baseline_diversity * 0.5:\n", + " print(\"Warning: Generated diversity is much lower than baseline -- possible mode collapse.\")\n", + "elif diversity_l2 > baseline_diversity * 1.5:\n", + " print(\"Note: Generated diversity exceeds baseline -- check if the extra variation is meaningful.\")\n", + "else:\n", + " print(\"Generated diversity is comparable to baseline diversity.\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Reading the diversity results\n", + "\n", + "- **Pairwise heatmap**: Uniform warm colours = good diversity (all designs differ\n", + " from each other). A block of cool/dark colours = a cluster of near-identical\n", + " designs (partial mode collapse).\n", + "\n", + "- **Diversity vs baseline**: The baseline designs come from an optimiser run on\n", + " diverse conditions, so they naturally vary. If the generator's diversity is\n", + " much lower, it is producing less variety than the problem demands.\n", + "\n", + "- **Novelty**: Very low NN distance means the generator is reproducing training\n", + " examples almost exactly. Some proximity is expected (it learned from them),\n", + " but near-zero distance suggests memorisation rather than generalisation." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Part 6: Optimization Warmstarting -- \"Does it speed up search?\"\n", + "\n", + "The **ultimate downstream test** for a generative model in engineering design:\n", + "if we use its output as a *starting point* for topology optimisation, does the\n", + "optimiser converge faster or find better solutions than starting from scratch?\n", + "\n", + "### The optimality gap metrics\n", + "\n", + "Starting from a generated design, we run the problem's optimiser and track the\n", + "objective at each step:\n", + "\n", + "- **IOG (Initial Optimality Gap)** = objective at step 0 minus baseline optimum.\n", + " *How good is the starting point?*\n", + "\n", + "- **FOG (Final Optimality Gap)** = objective at final step minus baseline optimum.\n", + " *How good is the final result?*\n", + "\n", + "- **COG (Cumulative Optimality Gap)** = sum of all per-step gaps.\n", + " *How much total \"wasted effort\" occurred across the trajectory?*\n", + " The shaded area in the trajectory plot.\n", + "\n", + "```\n", + "Objective\n", + " ^\n", + " | * IOG = obj[0] - baseline\n", + " | \\ *\n", + " | \\ * * Shaded area = COG\n", + " | \\ * * *\n", + " | ─ ─ ─ ─ ─ ─ ─ ─ FOG = obj[-1] - baseline\n", + " | - - - - - - - - - - - ← baseline (optimised reference)\n", + " └────────────────────────> Step\n", + "```\n", + "\n", + "- IOG < 0 is ideal: the generated design is *already better* than the baseline\n", + "- FOG ≈ 0: the optimiser recovers to baseline quality regardless of start\n", + "- Small COG: the optimiser converges quickly from this warmstart" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# DEMO: Optimization warmstarting on a small subset (3 samples)\n", + "# This runs the EngiBench optimiser from each generated design and tracks the trajectory.\n", + "# We use only 3 samples because optimization is slower than simulation.\n", + "\n", + "problem_opt = BUILTIN_PROBLEMS[PROBLEM_ID](seed=7)\n", + "n_opt_demo = min(3, len(gen_designs))\n", + "opt_data = []\n", + "\n", + "for i in range(n_opt_demo):\n", + " cfg = dict(conditions[i])\n", + "\n", + " # Run optimiser from generated design\n", + " problem_opt.reset(seed=7 + i)\n", + " _, opt_history = problem_opt.optimize(gen_designs[i], config=cfg)\n", + "\n", + " # Get baseline objective for reference\n", + " problem_opt.reset(seed=7 + i)\n", + " base_obj = float(problem_opt.simulate(baseline_designs[i], config=cfg)[0])\n", + "\n", + " # Extract objective trajectory\n", + " obj_trajectory = [float(step.obj_values) for step in opt_history]\n", + "\n", + " opt_data.append({\n", + " \"sample_idx\": i,\n", + " \"obj_trajectory\": obj_trajectory,\n", + " \"base_obj\": base_obj,\n", + " })\n", + " iog = obj_trajectory[0] - base_obj\n", + " fog = obj_trajectory[-1] - base_obj\n", + " cog = sum(o - base_obj for o in obj_trajectory)\n", + " print(f\"Sample {i}: IOG={iog:.1f} FOG={fog:.1f} COG={cog:.1f} ({len(opt_history)} steps)\")\n", + "\n", + "print(f\"\\nOptimization complete for {n_opt_demo} samples.\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_optimization_trajectories(opt_data)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Reading the optimization trajectories\n", + "\n", + "- **Steep drop at step 0→1**: The generated design was far from optimal, but the\n", + " optimiser quickly improved it. This still counts as a useful warmstart if\n", + " the total trajectory (COG) is shorter than starting from scratch.\n", + "\n", + "- **Flat trajectory near baseline**: The generated design was already near-optimal\n", + " and the optimiser had little work to do. Best-case scenario.\n", + "\n", + "- **Trajectory above baseline throughout**: The generated design was so far from\n", + " optimal that even after optimisation it never reached baseline quality. This\n", + " suggests the model is producing designs in the wrong region of design space.\n", + "\n", + "**In practice**, you would run this on many more samples and average the IOG/COG/FOG\n", + "to get statistically robust estimates. For the workshop, 3 samples illustrate\n", + "the concept." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Part 7: Putting It All Together\n", + "\n", + "Now we aggregate all the metrics from Parts 2-6 into a single summary table.\n", + "This is the kind of table you would report in a paper or use to compare\n", + "different generative models." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# PUBLIC FILL-IN CELL 02-C\n", + "# Goal: build a comprehensive summary dict and wrap it in a DataFrame.\n", + "\n", + "# START FILL ---------------------------------------------------------------\n", + "# Compute average IOG/FOG/COG from the optimization demo\n", + "avg_iog = float(np.mean([d[\"obj_trajectory\"][0] - d[\"base_obj\"] for d in opt_data]))\n", + "avg_fog = float(np.mean([d[\"obj_trajectory\"][-1] - d[\"base_obj\"] for d in opt_data]))\n", + "avg_cog = float(np.mean([sum(o - d[\"base_obj\"] for o in d[\"obj_trajectory\"]) for d in opt_data]))\n", + "\n", + "summary = {\n", + " # Simulation performance\n", + " \"n_samples\": len(results),\n", + " \"gen_obj_mean\": float(results[\"gen_obj\"].mean()),\n", + " \"base_obj_mean\": float(results[\"base_obj\"].mean()),\n", + " \"objective_gap_mean\": float(results[\"gen_minus_base\"].mean()),\n", + " \"improvement_rate\": float((results[\"gen_obj\"] < results[\"base_obj\"]).mean()),\n", + " # Constraint satisfaction\n", + " \"gen_feasible_rate\": float(results[\"gen_feasible\"].mean()),\n", + " \"base_feasible_rate\": float(results[\"base_feasible\"].mean()),\n", + " \"gen_violation_ratio\": float((~results[\"gen_feasible\"]).mean()),\n", + " \"base_violation_ratio\": float((~results[\"base_feasible\"]).mean()),\n", + " # Distributional similarity\n", + " \"mmd\": mmd_value,\n", + " # Diversity & novelty\n", + " \"gen_diversity_l2\": diversity_l2,\n", + " \"gen_novelty_to_train_l2\": novelty_nn,\n", + " # Optimization warmstarting (from demo subset)\n", + " \"avg_iog\": avg_iog,\n", + " \"avg_fog\": avg_fog,\n", + " \"avg_cog\": avg_cog,\n", + "}\n", + "\n", + "summary_df = pd.DataFrame([summary])\n", + "# END FILL -----------------------------------------------------------------\n", + "\n", + "# Show the summary transposed for readability (one metric per row)\n", + "display(summary_df.T.rename(columns={0: \"value\"}))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# CHECKPOINT 02-C\n", + "assert \"summary_df\" in dir() and summary_df is not None, \"Define summary_df\"\n", + "assert len(summary_df) == 1, \"summary_df should have exactly one row\"\n", + "required_keys = {\n", + " \"n_samples\", \"gen_obj_mean\", \"base_obj_mean\", \"objective_gap_mean\",\n", + " \"improvement_rate\", \"gen_feasible_rate\", \"base_feasible_rate\",\n", + " \"gen_violation_ratio\", \"base_violation_ratio\",\n", + " \"mmd\", \"gen_diversity_l2\", \"gen_novelty_to_train_l2\",\n", + " \"avg_iog\", \"avg_fog\", \"avg_cog\",\n", + "}\n", + "missing = required_keys - set(summary_df.columns)\n", + "assert not missing, f\"Missing summary columns: {missing}\"\n", + "assert summary_df[\"gen_obj_mean\"].notna().all(), \"gen_obj_mean is NaN\"\n", + "print(\"Checkpoint 02-C passed: comprehensive summary table is complete.\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_metric_summary_dashboard(summary)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Export artifacts" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "results_path = ARTIFACT_DIR / \"per_sample_metrics.csv\"\n", + "summary_path = ARTIFACT_DIR / \"metrics_summary.csv\"\n", + "\n", + "results.to_csv(results_path, index=False)\n", + "summary_df.to_csv(summary_path, index=False)\n", + "\n", + "# Save objective histogram\n", + "hist_path = ARTIFACT_DIR / \"objective_histogram.png\"\n", + "fig, ax = plt.subplots(figsize=(7, 4))\n", + "ax.hist(results[\"gen_obj\"], bins=10, alpha=0.7, label=\"Generated\", color=\"#4C72B0\")\n", + "ax.hist(results[\"base_obj\"], bins=10, alpha=0.7, label=\"Baseline\", color=\"#DD8452\")\n", + "ax.set_xlabel(\"Compliance (lower is better)\")\n", + "ax.set_ylabel(\"Count\")\n", + "ax.set_title(\"Generated vs baseline objective distribution\")\n", + "ax.legend()\n", + "fig.tight_layout()\n", + "fig.savefig(hist_path, dpi=150)\n", + "plt.close(fig)\n", + "\n", + "# Save scatter plot\n", + "scatter_path = ARTIFACT_DIR / \"objective_scatter.png\"\n", + "fig, ax = plt.subplots(figsize=(5, 5))\n", + "ax.scatter(results[\"base_obj\"], results[\"gen_obj\"], alpha=0.8)\n", + "lo = min(results[\"base_obj\"].min(), results[\"gen_obj\"].min()) * 0.9\n", + "hi = max(results[\"base_obj\"].max(), results[\"gen_obj\"].max()) * 1.1\n", + "ax.plot([lo, hi], [lo, hi], \"--\", color=\"gray\", linewidth=1)\n", + "ax.set_xlabel(\"Baseline compliance\")\n", + "ax.set_ylabel(\"Generated compliance\")\n", + "ax.set_title(\"Per-sample objective comparison\")\n", + "fig.tight_layout()\n", + "fig.savefig(scatter_path, dpi=150)\n", + "plt.close(fig)\n", + "\n", + "# Save design grid\n", + "grid_path = ARTIFACT_DIR / \"design_grid.png\"\n", + "fig, axes_grid = plt.subplots(2, min(6, len(gen_designs)), figsize=(14, 5))\n", + "for i in range(min(6, len(gen_designs))):\n", + " axes_grid[0, i].imshow(gen_designs[i], cmap=\"gray\", vmin=0, vmax=1)\n", + " axes_grid[0, i].set_title(f\"gen {i}\", fontsize=9)\n", + " axes_grid[0, i].axis(\"off\")\n", + " axes_grid[1, i].imshow(baseline_designs[i], cmap=\"gray\", vmin=0, vmax=1)\n", + " axes_grid[1, i].set_title(f\"base {i}\", fontsize=9)\n", + " axes_grid[1, i].axis(\"off\")\n", + "fig.tight_layout()\n", + "fig.savefig(grid_path, dpi=150)\n", + "plt.close(fig)\n", + "\n", + "print(\"Exported:\")\n", + "for p in [results_path, summary_path, hist_path, scatter_path, grid_path]:\n", + " print(f\" {p}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Discussion prompts\n", + "\n", + "Use these questions to prepare for the workshop breakout discussion. There are no\n", + "\"right\" answers -- the goal is to develop your own informed perspective.\n", + "\n", + "1. **Which metric category matters most for your domain?** In safety-critical\n", + " applications (aerospace, medical devices), constraint satisfaction is a hard\n", + " requirement. In early-stage concept exploration, diversity might matter more.\n", + " What about your own research area?\n", + "\n", + "2. **When do metrics disagree?** A model might score well on MMD (distributional\n", + " match) but poorly on per-sample objective (simulation performance). What does\n", + " that disagreement tell you? Which metric would you trust more?\n", + "\n", + "3. **Is diversity always good?** A model that produces wildly different designs\n", + " scores high on diversity -- but some of those designs might be nonsensical.\n", + " When does high diversity indicate a problem rather than a strength?\n", + "\n", + "4. **The warmstarting test.** If a model's IOG is poor (bad starting points) but\n", + " FOG is near zero (optimiser recovers), is the model useful? What if IOG is\n", + " great but the optimiser diverges (FOG increases)?\n", + "\n", + "5. **When would you trust these results for a paper?** We evaluated 24 samples\n", + " with a model trained for 8 epochs on 512 examples. What would need to change\n", + " to make these numbers publication-ready? (Think: sample size, training budget,\n", + " statistical significance, multiple seeds.)\n", + "\n", + "6. **Objective vs feasibility trade-off.** If your model produces designs with\n", + " great compliance but poor volume-fraction adherence, is that progress or a\n", + " failure? How would you communicate this nuance in a benchmark table?" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Reflection: what did you learn in NB02?\n", + "\n", + "Before closing, write down your answers to these prompts:\n", + "\n", + "1. **What do the metrics tell you about your model?** Look at your summary table.\n", + " Where does the generator excel, and where does it fall short? Which metric\n", + " surprised you most?\n", + "\n", + "2. **Which visualisation was most informative?** Was it the residual heatmaps,\n", + " the PCA embedding, the optimization trajectories, or something else? Why?\n", + "\n", + "3. **What would a full benchmark study add?** A complete EngiBench evaluation\n", + " would test across multiple problems, multiple seeds, larger sample sizes, and\n", + " the full metric suite (MMD, DPP, IOG/COG/FOG, violation ratio). How would\n", + " that change your confidence in the conclusions?\n", + "\n", + "4. **How would you improve the generator?** Based on the diagnostic pattern you\n", + " see (which categories are strong vs weak), what would you change about the\n", + " model architecture, training procedure, or data pipeline?" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Troubleshooting\n", + "\n", + "If a section fails, do not continue downstream. Fix the failing cell first, then\n", + "rerun it and its checkpoint before moving on. The notebook is staged so that\n", + "failures are localised." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "name": "python", + "version": "3.10.0" + }, + "accelerator": "GPU", + "colab": { + "provenance": [], + "gpuType": "T4" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/workshops/dcc26/solutions/03_add_new_problem_scaffold.ipynb b/workshops/dcc26/solutions/03_add_new_problem_scaffold.ipynb new file mode 100644 index 0000000..86c1b18 --- /dev/null +++ b/workshops/dcc26/solutions/03_add_new_problem_scaffold.ipynb @@ -0,0 +1,790 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Notebook 03 (Solution): Add a New Problem to EngiBench\n", + "\n", + "**Time budget: ~25 minutes** | 3 fill-in exercises | Mostly guided walkthrough\n", + "\n", + "In this notebook you will see how to wrap a **new simulator** as an EngiBench `Problem`,\n", + "so that every model in EngiOpt can immediately train on it with zero code changes.\n", + "\n", + "We will build a **planar 2-link robot manipulator co-design problem**: choose link\n", + "lengths, motor strength, and control gains so the arm reaches a target with minimal\n", + "tracking error and energy." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Edit-safe start:** this notebook opens from GitHub in read-only source mode. Use **File -> Save a copy in Drive** before running edits so your changes stay in your own workspace." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Notebook map\n", + "\n", + "This notebook is a **guided walkthrough** with 3 small fill-in exercises.\n", + "Most code is pre-written -- your job is to **read, run, and understand** the\n", + "EngiBench Problem contract, then fill in 3 targeted methods.\n", + "\n", + "### Public exercise legend\n", + "- `PUBLIC FILL-IN CELL`: implement this method (skeleton + hints provided).\n", + "- `CHECKPOINT`: run and verify before continuing.\n", + "- Pre-written cells: read and run -- these are fully working code." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## The problem: Planar manipulator co-design\n", + "\n", + "Imagine a simple robot arm bolted to a table. It has **two rigid links**\n", + "connected by revolute joints, and it needs to reach a target point in 2D space.\n", + "\n", + "```\n", + " target\n", + " X (target_x, target_y)\n", + " /\n", + " / link 2 (length l2)\n", + " /\n", + " joint 2\n", + " /\n", + " / link 1 (length l1)\n", + " /\n", + " joint 1\n", + " *------------ table / base\n", + "```\n", + "\n", + "**What we design** (the design vector, 6 variables):\n", + "\n", + "| Index | Variable | Range | Meaning |\n", + "|-------|----------|-------|---------|\n", + "| 0 | `link1_m` | 0.25 -- 1.00 | Length of link 1 (meters) |\n", + "| 1 | `link2_m` | 0.20 -- 0.95 | Length of link 2 (meters) |\n", + "| 2 | `motor_strength` | 2.0 -- 30.0 | Motor torque multiplier |\n", + "| 3 | `kp` | 5.0 -- 120.0 | Proportional control gain |\n", + "| 4 | `kd` | 0.2 -- 18.0 | Derivative control gain |\n", + "| 5 | `damping` | 0.0 -- 1.5 | Joint damping coefficient |\n", + "\n", + "**Conditions** (set by the environment, not the designer):\n", + "- `target_x`, `target_y`: where the arm must reach\n", + "- `payload_kg`: mass at the end-effector\n", + "- `disturbance_scale`: random torque noise during simulation\n", + "\n", + "**Objectives** (both minimized):\n", + "1. `final_tracking_error_m`: how far the end-effector is from the target at the end\n", + "2. `actuation_energy_j`: total energy spent by the motors\n", + "\n", + "**Why this is a co-design problem**: we are simultaneously choosing the *hardware*\n", + "(link lengths, motor) and the *controller* (gains, damping). This is exactly the\n", + "kind of coupled design problem where generative models can help explore the space." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## The EngiBench Problem contract\n", + "\n", + "Every problem in EngiBench implements the same interface. This is what makes it\n", + "possible to train **any** EngiOpt model on **any** problem with zero model code changes.\n", + "\n", + "The key pieces:\n", + "\n", + "| Attribute / Method | Purpose |\n", + "|---|---|\n", + "| `design_space` | A `gymnasium.spaces.Box` defining valid designs |\n", + "| `objectives` | Tuple of `(name, direction)` pairs |\n", + "| `conditions` | Dataclass of environmental conditions |\n", + "| `design_constraints` | List of constraint functions |\n", + "| `check_constraints(design, config)` | Returns list of violations (empty = feasible) |\n", + "| `simulate(design, config)` | Runs the simulator, returns objective values |\n", + "| `optimize(start, config)` | Simple optimizer, returns `(best_design, history)` |\n", + "| `render(design)` | Visualization for human inspection |\n", + "| `random_design()` | Sample a random valid design |\n", + "\n", + "In this notebook, most of these are **pre-written**. You will fill in 3 methods\n", + "that test your understanding of the contract." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Colab/local dependency bootstrap\n", + "import subprocess\n", + "import sys\n", + "\n", + "IN_COLAB = \"google.colab\" in sys.modules\n", + "FORCE_INSTALL = False # Set True to force install outside Colab\n", + "\n", + "\n", + "def pip_install(packages: list[str]):\n", + " cmd = [sys.executable, \"-m\", \"pip\", \"install\", *packages]\n", + " print(\"Running:\", \" \".join(cmd))\n", + " subprocess.check_call(cmd)\n", + "\n", + "\n", + "BASE_PACKAGES = [\"engibench[all]\", \"matplotlib\", \"gymnasium\", \"pybullet\"]\n", + "ENGIOPT_GIT = \"git+https://github.com/IDEALLab/EngiOpt.git@codex/dcc26-workshop-notebooks#egg=engiopt\"\n", + "\n", + "if IN_COLAB or FORCE_INSTALL:\n", + " print(\"Installing dependencies...\")\n", + " pip_install(BASE_PACKAGES)\n", + " pip_install([ENGIOPT_GIT])\n", + "\n", + " try:\n", + " import torch # noqa: F401\n", + " except Exception:\n", + " pip_install([\"torch\", \"torchvision\"])\n", + "\n", + " print(\"Dependency install complete.\")\n", + "else:\n", + " print(\"Skipping install (using current environment). Set FORCE_INSTALL=True to install here.\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Step 1 -- Imports\n", + "\n", + "These are the EngiBench building blocks we need to define a Problem." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from __future__ import annotations\n", + "\n", + "from dataclasses import dataclass\n", + "from typing import Annotated\n", + "\n", + "import numpy as np\n", + "from gymnasium import spaces\n", + "\n", + "from engibench.constraint import bounded\n", + "from engibench.constraint import constraint\n", + "from engibench.core import ObjectiveDirection\n", + "from engibench.core import OptiStep\n", + "from engibench.core import Problem\n", + "\n", + "import pybullet as p" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Step 2 -- Build the Problem class (guided walkthrough + 3 fill-ins)\n", + "\n", + "The cell below contains the **complete** `PlanarManipulatorCoDesignProblem` class.\n", + "Most methods are pre-written and working. **Three methods** are left for you to fill in.\n", + "\n", + "Read through the pre-written code to understand the structure, then complete:\n", + "\n", + "1. **Fill-in 03-A** (`simulate`): Merge config, clip design to bounds, call the rollout. A short wrapper method.\n", + "2. **Fill-in 03-B** (`random_design`): Sample a design from the design space. Essentially a one-liner.\n", + "3. **Fill-in 03-C** (`optimize`): Wire up a simple random-perturbation search loop using the hints provided.\n", + "\n", + "The pre-written methods handle all the PyBullet complexity -- you do NOT need to\n", + "understand robotics or physics simulation to complete the exercises." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Pre-written methods tour (read before filling in)\n", + "\n", + "Here is a quick guide to the pre-written methods you will see in the class:\n", + "\n", + "- **`__init__`**: Sets up the design space (6-dim Box), conditions, and constraints.\n", + "- **`_build_robot`**: Creates a 2-link arm in PyBullet with configurable link lengths and damping.\n", + "- **`_inverse_kinematics_2link`**: Given a target (x, y), computes the joint angles using the law of cosines. Standard closed-form 2-link IK.\n", + "- **`_forward_kinematics_2link`**: Given joint angles, computes end-effector (x, y). Simple trig.\n", + "- **`_rollout`**: Runs the full PyBullet simulation -- sets up PD control to track the target, applies disturbances, records tracking error and energy at each step.\n", + "- **`optimize`**: Random search over the design space -- tries perturbations, keeps the best.\n", + "- **`render`**: 4-panel matplotlib figure showing design variables, end-effector path, tracking error, and joint torques." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class PlanarManipulatorCoDesignProblem(Problem[np.ndarray]):\n", + " \"\"\"Robotics co-design problem: choose arm geometry + controller to reach a target.\n", + "\n", + " This wraps a PyBullet physics simulation as an EngiBench Problem so that\n", + " any EngiOpt generative model can train on it.\n", + " \"\"\"\n", + "\n", + " version = 0\n", + " objectives = (\n", + " (\"final_tracking_error_m\", ObjectiveDirection.MINIMIZE),\n", + " (\"actuation_energy_j\", ObjectiveDirection.MINIMIZE),\n", + " )\n", + "\n", + " @dataclass\n", + " class Conditions:\n", + " target_x: Annotated[float, bounded(lower=0.20, upper=1.35)] = 0.85\n", + " target_y: Annotated[float, bounded(lower=0.05, upper=1.20)] = 0.45\n", + " payload_kg: Annotated[float, bounded(lower=0.0, upper=2.0)] = 0.8\n", + " disturbance_scale: Annotated[float, bounded(lower=0.0, upper=0.30)] = 0.05\n", + "\n", + " @dataclass\n", + " class Config(Conditions):\n", + " sim_steps: Annotated[int, bounded(lower=60, upper=1200)] = 240\n", + " dt: Annotated[float, bounded(lower=1e-4, upper=0.05)] = 1.0 / 120.0\n", + " torque_limit: Annotated[float, bounded(lower=1.0, upper=50.0)] = 12.0\n", + " max_iter: Annotated[int, bounded(lower=1, upper=300)] = 60\n", + "\n", + " dataset_id = \"IDEALLab/planar_manipulator_codesign_v0\" # placeholder\n", + " container_id = None\n", + "\n", + " # ------------------------------------------------------------------ #\n", + " # __init__ (pre-written)\n", + " # ------------------------------------------------------------------ #\n", + " def __init__(self, seed: int = 0, **kwargs):\n", + " super().__init__(seed=seed)\n", + " self.config = self.Config(**kwargs)\n", + " self.conditions = self.Conditions(\n", + " target_x=self.config.target_x,\n", + " target_y=self.config.target_y,\n", + " payload_kg=self.config.payload_kg,\n", + " disturbance_scale=self.config.disturbance_scale,\n", + " )\n", + "\n", + " # Design vector = [link1_m, link2_m, motor_strength, kp, kd, damping]\n", + " self.design_space = spaces.Box(\n", + " low=np.array([0.25, 0.20, 2.0, 5.0, 0.2, 0.0], dtype=np.float32),\n", + " high=np.array([1.00, 0.95, 30.0, 120.0, 18.0, 1.5], dtype=np.float32),\n", + " dtype=np.float32,\n", + " )\n", + "\n", + " # --- Constraints ------------------------------------------------\n", + " # These use the @constraint decorator from EngiBench.\n", + " # A constraint function receives (design, **config_kwargs).\n", + " # It should ASSERT what must be true. If the assert fails,\n", + " # check_constraints() catches it and reports a violation.\n", + "\n", + " @constraint\n", + " def reachable_workspace(design: np.ndarray, target_x: float, target_y: float, **_) -> None:\n", + " l1, l2 = float(design[0]), float(design[1])\n", + " r = float(np.sqrt(target_x**2 + target_y**2))\n", + " assert l1 + l2 >= r + 0.03, f\"target radius {r:.3f} exceeds reach {l1 + l2:.3f}\"\n", + "\n", + " @constraint\n", + " def gain_consistency(design: np.ndarray, **_) -> None:\n", + " kp, kd = float(design[3]), float(design[4])\n", + " assert kd <= 2.2 * np.sqrt(max(kp, 1e-6)), f\"kd={kd:.3f} too high for kp={kp:.3f}\"\n", + "\n", + " self.design_constraints = [reachable_workspace, gain_consistency]\n", + "\n", + " # ------------------------------------------------------------------ #\n", + " # _build_robot (pre-written -- PyBullet internals)\n", + " # ------------------------------------------------------------------ #\n", + " def _build_robot(self, l1: float, l2: float, payload_kg: float, damping: float) -> tuple[int, int]:\n", + " \"\"\"Create a 2-link planar arm in PyBullet. Returns (robot_id, ee_link_index).\"\"\"\n", + " p.resetSimulation()\n", + " p.setGravity(0, 0, -9.81)\n", + "\n", + " link_masses = [0.5 + 0.2 * payload_kg, 0.35 + 0.25 * payload_kg]\n", + " link_collision = [-1, -1]\n", + " link_visual = [\n", + " p.createVisualShape(p.GEOM_CAPSULE, radius=0.025, length=l1, rgbaColor=[0.2, 0.5, 0.9, 1.0]),\n", + " p.createVisualShape(p.GEOM_CAPSULE, radius=0.020, length=l2, rgbaColor=[0.9, 0.4, 0.2, 1.0]),\n", + " ]\n", + " qx = p.getQuaternionFromEuler([0.0, np.pi / 2.0, 0.0])\n", + "\n", + " robot = p.createMultiBody(\n", + " baseMass=0.0,\n", + " baseCollisionShapeIndex=-1,\n", + " baseVisualShapeIndex=-1,\n", + " basePosition=[0, 0, 0],\n", + " linkMasses=link_masses,\n", + " linkCollisionShapeIndices=link_collision,\n", + " linkVisualShapeIndices=link_visual,\n", + " linkPositions=[[0, 0, 0], [l1, 0, 0]],\n", + " linkOrientations=[qx, qx],\n", + " linkInertialFramePositions=[[l1 / 2.0, 0, 0], [l2 / 2.0, 0, 0]],\n", + " linkInertialFrameOrientations=[[0, 0, 0, 1], [0, 0, 0, 1]],\n", + " linkParentIndices=[0, 1],\n", + " linkJointTypes=[p.JOINT_REVOLUTE, p.JOINT_REVOLUTE],\n", + " linkJointAxis=[[0, 0, 1], [0, 0, 1]],\n", + " )\n", + "\n", + " for j in [0, 1]:\n", + " p.changeDynamics(robot, j, linearDamping=0.0, angularDamping=float(damping))\n", + "\n", + " return robot, 1\n", + "\n", + " # ------------------------------------------------------------------ #\n", + " # _inverse_kinematics_2link (pre-written -- standard 2-link IK)\n", + " # ------------------------------------------------------------------ #\n", + " def _inverse_kinematics_2link(self, x: float, y: float, l1: float, l2: float) -> tuple[float, float]:\n", + " \"\"\"Closed-form IK for a 2-link planar arm using the law of cosines.\"\"\"\n", + " r2 = x * x + y * y\n", + " c2 = (r2 - l1 * l1 - l2 * l2) / (2.0 * l1 * l2)\n", + " c2 = float(np.clip(c2, -1.0, 1.0))\n", + " s2 = float(np.sqrt(max(0.0, 1.0 - c2 * c2)))\n", + " q2 = float(np.arctan2(s2, c2))\n", + " q1 = float(np.arctan2(y, x) - np.arctan2(l2 * s2, l1 + l2 * c2))\n", + " return q1, q2\n", + "\n", + " # ------------------------------------------------------------------ #\n", + " # _forward_kinematics_2link (pre-written -- simple trig)\n", + " # ------------------------------------------------------------------ #\n", + " def _forward_kinematics_2link(self, q1: float, q2: float, l1: float, l2: float) -> tuple[float, float]:\n", + " \"\"\"Compute end-effector (x, y) from joint angles and link lengths.\"\"\"\n", + " x = l1 * np.cos(q1) + l2 * np.cos(q1 + q2)\n", + " y = l1 * np.sin(q1) + l2 * np.sin(q1 + q2)\n", + " return float(x), float(y)\n", + "\n", + " # ------------------------------------------------------------------ #\n", + " # _rollout (pre-written -- runs the full PyBullet simulation)\n", + " # ------------------------------------------------------------------ #\n", + " def _rollout(self, design: np.ndarray, cfg: dict, return_trace: bool = False):\n", + " \"\"\"Run PyBullet simulation with PD control. Returns objective vector.\"\"\"\n", + " l1, l2, motor_strength, kp, kd, damping = [float(v) for v in design]\n", + "\n", + " cid = p.connect(p.DIRECT)\n", + " try:\n", + " robot, _ = self._build_robot(l1, l2, cfg[\"payload_kg\"], damping)\n", + " q1_t, q2_t = self._inverse_kinematics_2link(cfg[\"target_x\"], cfg[\"target_y\"], l1, l2)\n", + "\n", + " err_trace = []\n", + " tau_trace = []\n", + " ee_trace = []\n", + " energy = 0.0\n", + "\n", + " for _step in range(int(cfg[\"sim_steps\"])):\n", + " for j, q_t in enumerate([q1_t, q2_t]):\n", + " p.setJointMotorControl2(\n", + " bodyUniqueId=robot,\n", + " jointIndex=j,\n", + " controlMode=p.POSITION_CONTROL,\n", + " targetPosition=q_t,\n", + " positionGain=float(kp) / 120.0,\n", + " velocityGain=float(kd) / 50.0,\n", + " force=float(cfg[\"torque_limit\"]) * float(motor_strength),\n", + " )\n", + "\n", + " if cfg[\"disturbance_scale\"] > 0:\n", + " disturb = self.np_random.normal(0.0, cfg[\"disturbance_scale\"], size=2)\n", + " p.applyExternalTorque(robot, 0, [0, 0, float(disturb[0])], p.LINK_FRAME)\n", + " p.applyExternalTorque(robot, 1, [0, 0, float(disturb[1])], p.LINK_FRAME)\n", + "\n", + " p.stepSimulation()\n", + "\n", + " js0 = p.getJointState(robot, 0)\n", + " js1 = p.getJointState(robot, 1)\n", + " q1, q2 = float(js0[0]), float(js1[0])\n", + " dq1, dq2 = float(js0[1]), float(js1[1])\n", + " tau1, tau2 = float(js0[3]), float(js1[3])\n", + "\n", + " ee_x, ee_y = self._forward_kinematics_2link(q1, q2, l1, l2)\n", + " err = float(np.sqrt((ee_x - cfg[\"target_x\"]) ** 2 + (ee_y - cfg[\"target_y\"]) ** 2))\n", + "\n", + " err_trace.append(err)\n", + " tau_trace.append((tau1, tau2))\n", + " ee_trace.append((ee_x, ee_y))\n", + " energy += (abs(tau1 * dq1) + abs(tau2 * dq2)) * float(cfg[\"dt\"])\n", + "\n", + " final_error = float(err_trace[-1])\n", + " obj = np.array([final_error, float(energy)], dtype=np.float32)\n", + "\n", + " if return_trace:\n", + " trace = {\n", + " \"ee_trace\": np.array(ee_trace, dtype=np.float32),\n", + " \"err_trace\": np.array(err_trace, dtype=np.float32),\n", + " \"tau_trace\": np.array(tau_trace, dtype=np.float32),\n", + " \"target\": np.array([cfg[\"target_x\"], cfg[\"target_y\"]], dtype=np.float32),\n", + " \"design\": np.array(design, dtype=np.float32),\n", + " \"objectives\": obj,\n", + " }\n", + " return obj, trace\n", + "\n", + " return obj\n", + " finally:\n", + " p.disconnect(cid)\n", + "\n", + " # ================================================================== #\n", + " # PUBLIC FILL-IN CELL 03-A: simulate\n", + " # ================================================================== #\n", + " def simulate(self, design: np.ndarray, config: dict | None = None) -> np.ndarray:\n", + " \"\"\"Run the simulator and return objective values.\n", + "\n", + " This is the main entry point that EngiOpt models call.\n", + " It should:\n", + " 1. Merge self.config defaults with any overrides from `config`\n", + " 2. Clip the design to the valid bounds\n", + " 3. Call self._rollout() and return the result\n", + " \"\"\"\n", + " # START FILL -------------------------------------------------------\n", + " cfg = {**self.config.__dict__, **(config or {})}\n", + " clipped = np.clip(design, self.design_space.low, self.design_space.high)\n", + " return self._rollout(clipped, cfg, return_trace=False)\n", + " # END FILL ---------------------------------------------------------\n", + "\n", + " # ================================================================== #\n", + " # PUBLIC FILL-IN CELL 03-B: random_design\n", + " # ================================================================== #\n", + " # Note: check_constraints() is inherited from Problem. It calls each\n", + " # function in self.design_constraints and collects assertion failures.\n", + " # The constraints are defined in __init__ above -- look at them!\n", + " #\n", + " # This fill-in is about random_design(), which is used by the optimizer\n", + " # and by dataset generation to sample starting points.\n", + "\n", + " def random_design(self):\n", + " \"\"\"Return (design, reward) where design is sampled uniformly from bounds.\n", + "\n", + " Convention: reward = -1 (dummy value, since we have not simulated yet).\n", + " \"\"\"\n", + " # START FILL -------------------------------------------------------\n", + " design = self.np_random.uniform(self.design_space.low, self.design_space.high).astype(np.float32)\n", + " return design, -1.0\n", + " # END FILL ---------------------------------------------------------\n", + "\n", + " # ================================================================== #\n", + " # PUBLIC FILL-IN CELL 03-C: optimize\n", + " # ================================================================== #\n", + " def optimize(self, starting_point: np.ndarray, config: dict | None = None):\n", + " \"\"\"Simple random-perturbation optimizer.\n", + "\n", + " Returns (best_design, history) where history is a list of OptiStep.\n", + " Each OptiStep records the best objective values seen so far at that step.\n", + "\n", + " Algorithm:\n", + " 1. Start from starting_point, evaluate it\n", + " 2. For each iteration: perturb the best design with Gaussian noise,\n", + " clip to bounds, check constraints, simulate, keep if better\n", + " 3. \"Better\" = lower score, where score = error + 0.02 * energy\n", + " \"\"\"\n", + " # START FILL -------------------------------------------------------\n", + " cfg = {**self.config.__dict__, **(config or {})}\n", + " x = np.clip(starting_point, self.design_space.low, self.design_space.high).astype(np.float32)\n", + " best = x.copy()\n", + " best_obj = self.simulate(best, cfg)\n", + " best_score = float(best_obj[0] + 0.02 * best_obj[1])\n", + " history = [OptiStep(obj_values=best_obj, step=0)]\n", + " step_scale = np.array([0.05, 0.05, 2.5, 8.0, 1.2, 0.08], dtype=np.float32)\n", + "\n", + " for i in range(int(cfg.get(\"max_iter\", 60))):\n", + " candidate = best + self.np_random.normal(size=best.shape).astype(np.float32) * step_scale\n", + " candidate = np.clip(candidate, self.design_space.low, self.design_space.high)\n", + " violations = self.check_constraints(candidate, config=cfg)\n", + " if violations:\n", + " history.append(OptiStep(obj_values=best_obj, step=i + 1))\n", + " continue\n", + " cand_obj = self.simulate(candidate, cfg)\n", + " cand_score = float(cand_obj[0] + 0.02 * cand_obj[1])\n", + " if cand_score < best_score:\n", + " best = candidate.copy()\n", + " best_obj = cand_obj\n", + " best_score = cand_score\n", + " history.append(OptiStep(obj_values=best_obj, step=i + 1))\n", + "\n", + " return best, history\n", + " # END FILL ---------------------------------------------------------\n", + "\n", + " # ------------------------------------------------------------------ #\n", + " # render (pre-written -- 4-panel visualization)\n", + " # ------------------------------------------------------------------ #\n", + " def render(self, design: np.ndarray, *, open_window: bool = False):\n", + " \"\"\"Create a 4-panel diagnostic figure for a given design.\"\"\"\n", + " import matplotlib.pyplot as plt\n", + "\n", + " cfg = self.config.__dict__\n", + " x = np.clip(design.astype(np.float32), self.design_space.low, self.design_space.high)\n", + " obj, trace = self._rollout(x, cfg, return_trace=True)\n", + "\n", + " ee = trace[\"ee_trace\"]\n", + " err = trace[\"err_trace\"]\n", + " target = trace[\"target\"]\n", + " tau = trace[\"tau_trace\"]\n", + "\n", + " fig, axes = plt.subplots(1, 4, figsize=(17, 4.2))\n", + "\n", + " labels = [\"link1\", \"link2\", \"motor\", \"kp\", \"kd\", \"damping\"]\n", + " axes[0].bar(labels, x, color=[\"#4c78a8\", \"#4c78a8\", \"#f58518\", \"#54a24b\", \"#e45756\", \"#72b7b2\"])\n", + " axes[0].set_title(\"Design variables\")\n", + " axes[0].tick_params(axis=\"x\", rotation=35)\n", + "\n", + " axes[1].plot(ee[:, 0], ee[:, 1], lw=2, label=\"end-effector path\")\n", + " axes[1].scatter([target[0]], [target[1]], c=\"red\", marker=\"x\", s=70, label=\"target\")\n", + " r = x[0] + x[1]\n", + " circle = plt.Circle((0, 0), r, color=\"gray\", fill=False, linestyle=\"--\", alpha=0.5)\n", + " axes[1].add_patch(circle)\n", + " axes[1].set_aspect(\"equal\", \"box\")\n", + " axes[1].set_title(\"Task-space trajectory\")\n", + " axes[1].set_xlabel(\"x [m]\")\n", + " axes[1].set_ylabel(\"y [m]\")\n", + " axes[1].legend(fontsize=8)\n", + "\n", + " axes[2].plot(err, color=\"#e45756\")\n", + " axes[2].set_title(\"Tracking error over time\")\n", + " axes[2].set_xlabel(\"step\")\n", + " axes[2].set_ylabel(\"error [m]\")\n", + " axes[2].grid(alpha=0.3)\n", + "\n", + " axes[3].plot(np.abs(tau[:, 0]), label=\"|tau1|\")\n", + " axes[3].plot(np.abs(tau[:, 1]), label=\"|tau2|\")\n", + " axes[3].set_title(\"Actuation effort\")\n", + " axes[3].set_xlabel(\"step\")\n", + " axes[3].set_ylabel(\"torque [Nm]\")\n", + " axes[3].legend(fontsize=8)\n", + " axes[3].grid(alpha=0.3)\n", + "\n", + " fig.suptitle(\n", + " f\"Objectives: final_error={obj[0]:.4f} m, energy={obj[1]:.3f} J\",\n", + " y=1.03,\n", + " )\n", + " fig.tight_layout()\n", + "\n", + " if open_window:\n", + " plt.show()\n", + " return fig, axes" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### CHECKPOINT: Quick sanity check before the smoke test\n", + "\n", + "Run this cell to verify the class can be instantiated and the pre-written\n", + "parts work. This does NOT require your fill-ins yet." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# CHECKPOINT -- class instantiation (does not call your fill-ins)\n", + "prob_test = PlanarManipulatorCoDesignProblem(seed=0)\n", + "print(\"design_space:\", prob_test.design_space)\n", + "print(\"objectives:\", prob_test.objectives)\n", + "print(\"num constraints:\", len(prob_test.design_constraints))\n", + "print(\"conditions:\", prob_test.conditions)\n", + "print()\n", + "print(\"Class instantiation OK.\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Step 3 -- Smoke test\n", + "\n", + "Run this after completing **all 3 fill-ins** above.\n", + "\n", + "What success looks like:\n", + "- Non-empty optimization history\n", + "- Finite objective values (no NaN or Inf)\n", + "- A 4-panel figure renders without error\n", + "\n", + "**How to read the 4-panel figure**: Inspect the panels for (1) design parameter\n", + "values, (2) the end-effector path in task space with the target marked,\n", + "(3) tracking error decreasing over simulation steps, and (4) joint torque\n", + "profiles showing actuation effort." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Smoke test (run after implementing all 3 PUBLIC FILL-IN blocks)\n", + "problem = PlanarManipulatorCoDesignProblem(\n", + " seed=42,\n", + " target_x=0.9,\n", + " target_y=0.45,\n", + " payload_kg=0.8,\n", + " disturbance_scale=0.04,\n", + " sim_steps=220,\n", + " max_iter=40,\n", + ")\n", + "start, _ = problem.random_design()\n", + "\n", + "cfg = {\n", + " \"target_x\": 0.9,\n", + " \"target_y\": 0.45,\n", + " \"payload_kg\": 0.8,\n", + " \"disturbance_scale\": 0.04,\n", + " \"sim_steps\": 220,\n", + " \"dt\": 1.0 / 120.0,\n", + " \"torque_limit\": 12.0,\n", + " \"max_iter\": 40,\n", + "}\n", + "\n", + "print(\"design space:\", problem.design_space)\n", + "print(\"objectives:\", problem.objectives)\n", + "print(\"conditions:\", problem.conditions)\n", + "\n", + "viol = problem.check_constraints(start, config=cfg)\n", + "print(\"constraint violations:\", len(viol))\n", + "\n", + "obj0 = problem.simulate(start, config=cfg)\n", + "opt_design, history = problem.optimize(start, config=cfg)\n", + "objf = problem.simulate(opt_design, config=cfg)\n", + "\n", + "print(\"initial objectives [tracking_error_m, energy_J]:\", obj0.tolist())\n", + "print(\"final objectives [tracking_error_m, energy_J]:\", objf.tolist())\n", + "print(\"optimization steps:\", len(history))\n", + "\n", + "# CHECKPOINT\n", + "assert len(history) > 0, \"Optimization history should not be empty\"\n", + "assert np.all(np.isfinite(obj0)), \"Initial objective contains non-finite values\"\n", + "assert np.all(np.isfinite(objf)), \"Final objective contains non-finite values\"\n", + "print(\"All assertions passed.\")\n", + "\n", + "problem.render(opt_design)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## The power of a standardized interface\n", + "\n", + "Notice what just happened: we wrapped a completely new simulator (PyBullet robotics)\n", + "as an EngiBench `Problem`, and it exposes the same interface as `beams2d`,\n", + "`heatconduction2d`, or any other problem in the benchmark.\n", + "\n", + "This means that **every generative model in EngiOpt** -- the CGAN you trained in\n", + "Notebook 01, the diffusion models, the VAEs -- could be trained on this manipulator\n", + "problem **with zero model code changes**. You would only need to point the training\n", + "script at the new problem ID.\n", + "\n", + "That is the core value proposition of EngiBench: **decouple the problem from the\n", + "method** so researchers can focus on one or the other without rewriting glue code." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Contributing to EngiBench: what you need\n", + "\n", + "If you have an engineering problem from your own domain that you would like to\n", + "contribute to the benchmark, here is the checklist:\n", + "\n", + "1. **Design space**: Define a `gymnasium.spaces.Box` (or `Dict`) for the design variables, with physically meaningful bounds.\n", + "\n", + "2. **Simulator**: Implement `simulate(design, config) -> objective_values`. This is the core -- it maps a design to measurable performance. Must be deterministic for a given seed.\n", + "\n", + "3. **Constraints**: Define constraint functions using the `@constraint` decorator. Each should `assert` what must be true for a design to be feasible.\n", + "\n", + "4. **Dataset**: Generate a dataset of (design, conditions, objectives) tuples and host it on HuggingFace. This is what generative models train on.\n", + "\n", + "5. **Render method**: A visualization that helps humans interpret designs. Not strictly required for training, but essential for debugging and papers.\n", + "\n", + "6. **Metadata**: Version number, objective names and directions, condition ranges, and a docstring explaining the problem physics.\n", + "\n", + "See the [EngiBench contribution guide](https://github.com/IDEALLab/EngiBench) for the full template and review process." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Takeaways\n", + "\n", + "Before closing, reflect on these questions:\n", + "\n", + "1. **What are the minimum requirements** for adding a new problem to EngiBench? Which methods and attributes are essential vs. nice-to-have?\n", + "\n", + "2. **Which part of the Problem interface** was most intuitive? Which was least intuitive? (For example: design_space, constraints, simulate, render, optimize...)\n", + "\n", + "3. **What engineering problem from YOUR domain** could you contribute as a benchmark? What would the design vector look like? What would you simulate?" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Optional extension -- Train an EngiOpt model on this problem\n", + "\n", + "The solutions notebook contains a full optional extension that:\n", + "\n", + "1. Generates a feasible dataset from simulator rollouts\n", + "2. Trains `engiopt.cgan_1d` (the same model architecture from Notebook 01) on the manipulator problem\n", + "3. Compares generated designs vs. a random baseline\n", + "\n", + "This demonstrates the key point: because our manipulator problem uses the standard\n", + "EngiBench interface, we can reuse EngiOpt model code directly.\n", + "\n", + "To try it yourself, see the **solutions notebook**:\n", + "`workshops/dcc26/solutions/03_add_new_problem_scaffold.ipynb`\n", + "\n", + "The essential idea in ~10 lines of pseudocode:\n", + "\n", + "```python\n", + "# 1. Generate dataset\n", + "for _ in range(N_SAMPLES):\n", + " design, _ = problem.random_design()\n", + " if problem.check_constraints(design, cfg) == []:\n", + " obj = problem.simulate(design, cfg)\n", + " dataset.append((design, conditions, obj))\n", + "\n", + "# 2. Train CGAN on top-performing designs\n", + "generator = cgan1d.Generator(latent_dim=8, n_conds=4, design_shape=(6,), ...)\n", + "# ... standard GAN training loop ...\n", + "\n", + "# 3. Generate + evaluate\n", + "new_design = generator(z, conditions)\n", + "obj = problem.simulate(new_design, cfg) # same interface!\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Troubleshooting\n", + "\n", + "- **`NotImplementedError`**: You have not yet filled in one of the 3 exercises. Check `simulate()`, `random_design()`, and `optimize()`.\n", + "- **`AssertionError` in smoke test**: Your fill-in runs but produces incorrect values. Re-read the hints in the `# START FILL` block.\n", + "- **PyBullet connection error**: Make sure `pybullet` is installed. On Colab, the bootstrap cell handles this.\n", + "- **If a section fails, do not continue downstream.** Fix locally first, then rerun." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "name": "python", + "version": "3.11.0" + }, + "accelerator": "GPU", + "colab": { + "provenance": [], + "gpuType": "T4" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/workshops/dcc26/utils/__init__.py b/workshops/dcc26/utils/__init__.py new file mode 100644 index 0000000..e9c9c07 --- /dev/null +++ b/workshops/dcc26/utils/__init__.py @@ -0,0 +1,3 @@ +"""Utilities for DCC26 workshop notebooks.""" + +from .notebook_helpers import * # noqa: F401,F403 diff --git a/workshops/dcc26/utils/notebook_helpers.py b/workshops/dcc26/utils/notebook_helpers.py new file mode 100644 index 0000000..73cd5be --- /dev/null +++ b/workshops/dcc26/utils/notebook_helpers.py @@ -0,0 +1,1054 @@ +"""Helper utilities for DCC26 workshop notebooks. + +All visualization and boilerplate code lives here so that notebook cells +stay ultra-short (2-3 lines each). Import with:: + + import sys, os + sys.path.insert(0, os.path.join(os.path.dirname("__file__"), "..", "utils")) + from notebook_helpers import * +""" + +from __future__ import annotations + +import json +import os +import random +from typing import Any + +import matplotlib.pyplot as plt +import numpy as np +import torch as th + +# --------------------------------------------------------------------------- +# Reproducibility +# --------------------------------------------------------------------------- + + +def set_global_seed(seed: int) -> None: + """Set seeds for reproducibility across numpy, python, and torch.""" + random.seed(seed) + np.random.seed(seed) + th.manual_seed(seed) + if th.cuda.is_available(): + th.cuda.manual_seed_all(seed) + th.backends.cudnn.deterministic = True + th.backends.cudnn.benchmark = False + + +def pick_device() -> th.device: + """Pick the best available torch device.""" + if th.backends.mps.is_available(): + return th.device("mps") + if th.cuda.is_available(): + return th.device("cuda") + return th.device("cpu") + + +# --------------------------------------------------------------------------- +# File I/O +# --------------------------------------------------------------------------- + + +def ensure_dir(path: str) -> str: + os.makedirs(path, exist_ok=True) + return path + + +def save_json(data: Any, path: str) -> None: + with open(path, "w", encoding="utf-8") as f: + json.dump(data, f, indent=2) + + +def load_json(path: str) -> Any: + with open(path, encoding="utf-8") as f: + return json.load(f) + + +# --------------------------------------------------------------------------- +# Condition introspection +# --------------------------------------------------------------------------- + + +def _is_array_condition(dataset_train, key: str, n_check: int = 3) -> bool: + """Heuristic: is this condition key an array (image) rather than a scalar?""" + for i in range(min(n_check, len(dataset_train[key]))): + val = dataset_train[key][i] + arr = np.asarray(val) + if arr.ndim >= 1 and arr.size > 1: + return True + return False + + +def _split_condition_keys(dataset_train, problem) -> tuple[list[str], list[str]]: + """Split conditions into (scalar_keys, array_keys).""" + scalar, array = [], [] + for k in problem.conditions_keys: + if _is_array_condition(dataset_train, k): + array.append(k) + else: + scalar.append(k) + return scalar, array + + +# --------------------------------------------------------------------------- +# NB00 visualizations — uses problem.render() for everything +# --------------------------------------------------------------------------- + + +def show_design_gallery( + dataset, + problem, + n: int = 8, + seed: int = 7, +) -> None: + """Show a grid of random training designs. + + Renders designs directly with imshow in a single figure to avoid + duplicate-display issues with Jupyter's inline backend. + """ + rng = np.random.default_rng(seed) + train = dataset["train"] + n_total = len(train["optimal_design"]) + n = min(n, n_total) + ids = rng.choice(n_total, size=n, replace=False) + scalar_keys, _ = _split_condition_keys(train, problem) + + ncols = min(4, n) + nrows = (n + ncols - 1) // ncols + design_shape = problem.design_space.shape + aspect = design_shape[1] / design_shape[0] if len(design_shape) >= 2 else 1.0 + fig, axes = plt.subplots(nrows, ncols, figsize=(3.5 * ncols * aspect, 3.5 * nrows)) + axes = np.atleast_2d(axes) + + for i, idx in enumerate(ids): + ax = axes[i // ncols, i % ncols] + d = np.array(train["optimal_design"][int(idx)]) + ax.imshow(d, cmap="gray_r", vmin=0, vmax=1) + ax.axis("off") + if scalar_keys: + cond_str = "\n".join( + f"{k}={float(train[k][int(idx)]):.2f}" for k in scalar_keys + ) + ax.set_title(cond_str, fontsize=8) + + for i in range(n, nrows * ncols): + axes[i // ncols, i % ncols].axis("off") + + fig.suptitle(f"Random training designs ({n} samples)", fontsize=13, y=1.01) + fig.tight_layout() + plt.show() + plt.close(fig) + + +def show_condition_distributions(dataset, problem) -> None: + """Histogram of each scalar condition key across the training set.""" + train = dataset["train"] + scalar_keys, _ = _split_condition_keys(train, problem) + + if not scalar_keys: + print("This problem has no scalar conditions to plot.") + return + + fig, axes = plt.subplots(1, len(scalar_keys), figsize=(5 * len(scalar_keys), 3.5)) + if len(scalar_keys) == 1: + axes = [axes] + + for ax, key in zip(axes, scalar_keys): + values = np.array([float(v) for v in train[key]]) + ax.hist(values, bins=30, edgecolor="white", color="steelblue") + ax.set_xlabel(key, fontsize=11) + ax.set_ylabel("count") + ax.set_title(f"Distribution of '{key}'") + + plt.tight_layout() + plt.show() + plt.close(fig) + + +def show_valid_vs_violated(design, config, violations, valid_violations, problem=None, cmap="gray_r") -> None: + """Side-by-side rendering: valid config vs violated config. + + Uses ``problem.render()`` if provided, otherwise falls back to imshow. + """ + if problem is not None: + # Render via problem.render() for universal support + result1 = problem.render(design) + result2 = problem.render(design) + + fig1 = result1[0] if isinstance(result1, tuple) else result1 + fig2 = result2[0] if isinstance(result2, tuple) else result2 + + if hasattr(fig1, "savefig"): + n_valid = len(valid_violations) + fig1.suptitle( + f"Valid config — {n_valid} violation(s)", + color="green" if n_valid == 0 else "red", fontsize=12, + ) + plt.show() + plt.close(fig1) + + if hasattr(fig2, "savefig"): + n_bad = len(violations) + fig2.suptitle( + f"Bad config — {n_bad} violation(s)", + color="red" if n_bad > 0 else "green", fontsize=12, + ) + plt.show() + plt.close(fig2) + else: + # Fallback: simple imshow for 2D + fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 4)) + d = np.array(design) + ax1.imshow(d, cmap=cmap, vmin=0, vmax=1) + ax1.set_title(f"Valid — {len(valid_violations)} violation(s)", + color="green" if len(valid_violations) == 0 else "red") + ax1.axis("off") + ax2.imshow(d, cmap=cmap, vmin=0, vmax=1) + ax2.set_title(f"Bad — {len(violations)} violation(s)", + color="red" if len(violations) > 0 else "green") + ax2.axis("off") + fig.suptitle("Same design, different conditions", fontsize=13) + plt.tight_layout() + plt.show() + plt.close(fig) + + +def interactive_condition_explorer(dataset, problem): + """Interactive slider widget to explore designs by scalar condition values. + + Falls back to a static gallery if ipywidgets is unavailable or if + the problem has no scalar conditions. + """ + train = dataset["train"] + scalar_keys, _ = _split_condition_keys(train, problem) + all_designs = np.array(train["optimal_design"]) + n_total = len(all_designs) + + try: + import ipywidgets as widgets + from IPython.display import display as ipy_display + except ImportError: + print("ipywidgets not available — showing static gallery instead.") + show_design_gallery(dataset, problem) + return + + # Build scalar condition arrays + scalar_conds = {k: np.array([float(v) for v in train[k]]) for k in scalar_keys} + + # Build sliders for scalar conditions + sliders = {} + for key in scalar_keys: + vals = scalar_conds[key] + lo, hi = float(np.min(vals)), float(np.max(vals)) + step = max((hi - lo) / 100.0, 1e-6) + sliders[key] = widgets.FloatRangeSlider( + value=[lo, hi], + min=lo, + max=hi, + step=step, + description=key, + continuous_update=False, + layout=widgets.Layout(width="500px"), + style={"description_width": "130px"}, + ) + + output = widgets.Output() + + design_shape = problem.design_space.shape + aspect = design_shape[1] / design_shape[0] if len(design_shape) >= 2 else 1.0 + + def _update_gallery(**kwargs): + mask = np.ones(n_total, dtype=bool) + for key in scalar_keys: + lo, hi = kwargs[key] + mask &= (scalar_conds[key] >= lo) & (scalar_conds[key] <= hi) + + matching_ids = np.where(mask)[0] + + with output: + output.clear_output(wait=True) + if len(matching_ids) == 0: + print("No designs match these conditions. Widen the sliders.") + return + + n_show = min(8, len(matching_ids)) + rng = np.random.default_rng(42) + show_ids = rng.choice(matching_ids, size=n_show, replace=False) + + ncols = min(4, n_show) + nrows = (n_show + ncols - 1) // ncols + fig, axes = plt.subplots(nrows, ncols, figsize=(3.5 * ncols * aspect, 3.5 * nrows)) + axes = np.atleast_2d(axes) + + for i, idx in enumerate(show_ids): + ax = axes[i // ncols, i % ncols] + ax.imshow(all_designs[idx], cmap="gray_r", vmin=0, vmax=1) + ax.axis("off") + cond_str = "\n".join( + f"{k}={scalar_conds[k][idx]:.2f}" for k in scalar_keys + ) + ax.set_title(cond_str, fontsize=8) + + for i in range(n_show, nrows * ncols): + axes[i // ncols, i % ncols].axis("off") + + fig.suptitle(f"Matching designs: {len(matching_ids)}/{n_total}", fontsize=12) + fig.tight_layout() + plt.show() + plt.close(fig) + + if scalar_keys: + title = widgets.HTML("

Explore the dataset — drag sliders to filter by condition

") + slider_box = widgets.VBox(list(sliders.values())) + + def _on_slider_change(change): + _update_gallery(**{k: s.value for k, s in sliders.items()}) + + for s in sliders.values(): + s.observe(_on_slider_change, names="value") + + ipy_display(title, slider_box, output) + _update_gallery(**{k: s.value for k, s in sliders.items()}) + else: + # No scalar conditions (e.g., PowerElectronics) + show_design_gallery(dataset, problem) + + +# --------------------------------------------------------------------------- +# NB01 training helpers +# --------------------------------------------------------------------------- + + +class WorkshopGenerator(th.nn.Module): + """Thin wrapper around the EngiOpt CNN generator for the workshop. + + The CNN generator expects 4-D inputs ``(B, C, 1, 1)`` and returns + ``(B, 1, H, W)``. This wrapper lets callers pass flat 2-D tensors + ``(B, C)`` and returns ``(B, H, W)`` — matching the supervised-training + helpers that operate on numpy arrays of shape ``(N, H, W)``. + """ + + def __init__(self, cnn_generator: th.nn.Module): + super().__init__() + self.gen = cnn_generator + + def forward(self, z: th.Tensor, conds: th.Tensor) -> th.Tensor: + z_4d = z.unsqueeze(-1).unsqueeze(-1) # (B, z_dim) -> (B, z_dim, 1, 1) + c_4d = conds.unsqueeze(-1).unsqueeze(-1) # (B, n_c) -> (B, n_c, 1, 1) + out = self.gen(z_4d, c_4d) # (B, 1, H, W) + return out.squeeze(1) # (B, H, W) + + +def train_supervised_generator( + model, + train_conditions: np.ndarray, + train_targets: np.ndarray, + *, + latent_dim: int, + epochs: int = 8, + batch_size: int = 64, + lr: float = 2e-4, + device=None, + snapshot_conditions: np.ndarray | None = None, + snapshot_at_epochs: list[int] | None = None, + verbose: bool = True, +) -> dict: + """Train a conditional generator with supervised MSE loss. + + The generator learns to map (noise, conditions) to designs by minimising + the MSE between its outputs and real optimal designs from the dataset. + + Args: + model: Generator network. Forward signature: model(noise, conditions). + train_conditions: (N, n_conds) float32 array. + train_targets: (N, *design_shape) float32 array, scaled to [-1, 1]. + latent_dim: Dimensionality of the noise vector. + epochs: Number of training epochs. + batch_size: Mini-batch size. + lr: Adam learning rate. + device: Torch device. + snapshot_conditions: If provided, generate designs from these conditions + at epochs listed in *snapshot_at_epochs*. + snapshot_at_epochs: Epoch numbers (1-indexed) at which to capture snapshots. + verbose: Print per-epoch progress. + + Returns: + Dict with keys ``losses`` (list[float]) and ``snapshots`` + (list of (epoch, np.ndarray) pairs). + """ + from torch.utils.data import DataLoader, TensorDataset + + if device is None: + device = th.device("cpu") + device = th.device(device) if isinstance(device, str) else device + model = model.to(device) + model.train() + + optimizer = th.optim.Adam(model.parameters(), lr=lr) + criterion = th.nn.MSELoss() + + conds_t = th.tensor(train_conditions, dtype=th.float32, device=device) + targets_t = th.tensor(train_targets, dtype=th.float32, device=device) + dl = DataLoader(TensorDataset(conds_t, targets_t), batch_size=batch_size, shuffle=True) + + if snapshot_at_epochs is None: + snapshot_at_epochs = [] + snap_conds_t = None + if snapshot_conditions is not None: + snap_conds_t = th.tensor(snapshot_conditions, dtype=th.float32, device=device) + + losses: list[float] = [] + snapshots: list[tuple[int, np.ndarray]] = [] + + for epoch in range(1, epochs + 1): + model.train() + epoch_loss = 0.0 + for batch_conds, batch_targets in dl: + z = th.randn(batch_conds.shape[0], latent_dim, device=device) + fake = model(z, batch_conds) + loss = criterion(fake.flatten(1), batch_targets.flatten(1)) + optimizer.zero_grad() + loss.backward() + optimizer.step() + epoch_loss += loss.item() + avg = epoch_loss / len(dl) + losses.append(avg) + if verbose: + print(f" Epoch {epoch:3d}/{epochs} | Loss: {avg:.6f}") + + if epoch in snapshot_at_epochs and snap_conds_t is not None: + model.eval() + with th.no_grad(): + z = th.randn(len(snap_conds_t), latent_dim, device=device) + snap = model(z, snap_conds_t) + snap_np = ((snap.cpu().numpy() + 1.0) / 2.0).clip(0, 1) + snapshots.append((epoch, snap_np)) + + return {"losses": losses, "snapshots": snapshots} + + +def generate_designs( + model, + conditions: np.ndarray, + *, + latent_dim: int, + device=None, +) -> np.ndarray: + """Generate designs from conditions using a trained generator. + + Args: + model: Trained generator model. + conditions: (N, n_conds) float32 array. + latent_dim: Dimensionality of the noise vector. + device: Torch device. + + Returns: + (N, *design_shape) float32 array in [0, 1]. + """ + if device is None: + device = th.device("cpu") + device = th.device(device) if isinstance(device, str) else device + model.eval() + conds_t = th.tensor(conditions, dtype=th.float32, device=device) + with th.no_grad(): + z = th.randn(len(conditions), latent_dim, device=device) + raw = model(z, conds_t) + return ((raw.cpu().numpy() + 1.0) / 2.0).clip(0, 1) + + +def show_training_progression( + snapshots: list[tuple[int, np.ndarray]], + baseline_designs: np.ndarray | None = None, + n_show: int = 4, +) -> None: + """Visualize how generated designs evolve across training epochs. + + Args: + snapshots: List of (epoch, designs_array) tuples from training. + baseline_designs: If provided, show ground-truth in the last row. + n_show: Number of designs to display per row. + """ + if not snapshots: + print("No snapshots to display.") + return + + n_show = min(n_show, snapshots[0][1].shape[0]) + n_rows = len(snapshots) + (1 if baseline_designs is not None else 0) + + fig, axes = plt.subplots(n_rows, n_show, figsize=(2.8 * n_show, 2.5 * n_rows)) + if n_rows == 1: + axes = axes[np.newaxis, :] + if n_show == 1: + axes = axes[:, np.newaxis] + + for row, (epoch, designs) in enumerate(snapshots): + for col in range(n_show): + axes[row, col].imshow(designs[col], cmap="gray_r", vmin=0, vmax=1) + axes[row, col].axis("off") + axes[row, 0].set_ylabel( + f"Epoch {epoch}", fontsize=11, rotation=0, labelpad=55, va="center", + ) + + if baseline_designs is not None: + row = len(snapshots) + for col in range(n_show): + axes[row, col].imshow(baseline_designs[col], cmap="gray_r", vmin=0, vmax=1) + axes[row, col].axis("off") + axes[row, 0].set_ylabel( + "Ground\ntruth", fontsize=11, rotation=0, labelpad=55, va="center", + ) + + fig.suptitle("How the generator learns over training", fontsize=14, y=1.02) + fig.tight_layout() + plt.show() + plt.close(fig) + + +# --------------------------------------------------------------------------- +# NB01 visualizations +# --------------------------------------------------------------------------- + + +def show_training_curve(train_losses: list[float], save_path: str | None = None) -> None: + """Plot training loss over epochs.""" + fig, ax = plt.subplots(figsize=(7, 3.5)) + ax.plot(range(1, len(train_losses) + 1), train_losses, marker="o", linewidth=2, color="#2563eb") + ax.set_xlabel("Epoch", fontsize=12) + ax.set_ylabel("MSE Loss", fontsize=12) + ax.set_title("Generator Training Loss", fontsize=14) + ax.grid(True, alpha=0.3) + fig.tight_layout() + if save_path: + fig.savefig(save_path, dpi=120) + plt.show() + print(f"Final loss: {train_losses[-1]:.6f}") + + +def show_gen_vs_baseline( + gen_designs: np.ndarray, + baseline_designs: np.ndarray, + conditions_records: list[dict], + condition_keys: list[str], + n_show: int = 8, + problem=None, +) -> None: + """Show generated vs baseline designs using ``problem.render()`` if available.""" + n_show = min(n_show, len(gen_designs)) + + if problem is not None: + for i in range(n_show): + # Scalar conditions only for title + scalars = {k: v for k, v in conditions_records[i].items() + if not isinstance(v, (list, np.ndarray)) or np.asarray(v).size == 1} + cond_str = " | ".join(f"{k}: {float(v):.3f}" for k, v in scalars.items()) + + result_g = problem.render(gen_designs[i]) + fig_g = result_g[0] if isinstance(result_g, tuple) else result_g + if hasattr(fig_g, "savefig"): + fig_g.suptitle(f"Generated {i} — {cond_str}", fontsize=10, y=1.02) + plt.show() + plt.close(fig_g) + + result_b = problem.render(baseline_designs[i]) + fig_b = result_b[0] if isinstance(result_b, tuple) else result_b + if hasattr(fig_b, "savefig"): + fig_b.suptitle(f"Baseline {i}", fontsize=10, y=1.02) + plt.show() + plt.close(fig_b) + else: + # Fallback: side-by-side imshow grid + fig, axes = plt.subplots(2, n_show, figsize=(2.2 * n_show, 5.5)) + if n_show == 1: + axes = axes.reshape(2, 1) + for i in range(n_show): + axes[0, i].imshow(gen_designs[i], cmap="gray", vmin=0, vmax=1) + axes[0, i].axis("off") + cond_str = "\n".join(f"{k}: {conditions_records[i][k]:.3f}" for k in condition_keys) + axes[0, i].set_title(cond_str, fontsize=8) + axes[1, i].imshow(baseline_designs[i], cmap="gray", vmin=0, vmax=1) + axes[1, i].axis("off") + axes[0, 0].set_ylabel("Generated", fontsize=12, rotation=90, labelpad=10) + axes[1, 0].set_ylabel("Baseline", fontsize=12, rotation=90, labelpad=10) + fig.suptitle("Generated (top) vs Baseline (bottom)", fontsize=14, y=1.01) + fig.tight_layout() + plt.show() + plt.close(fig) + + +# --------------------------------------------------------------------------- +# NB02 visualizations +# --------------------------------------------------------------------------- + + +def show_objective_comparison(results) -> None: + """Histogram + scatter of generated vs baseline objectives.""" + fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(13, 5)) + + ax1.hist(results["gen_obj"], bins=10, alpha=0.7, label="Generated", color="#4C72B0") + ax1.hist(results["base_obj"], bins=10, alpha=0.7, label="Baseline", color="#DD8452") + ax1.set_xlabel("Objective (lower is better)") + ax1.set_ylabel("Count") + ax1.set_title("Objective distribution") + ax1.legend() + + colors = results["gen_feasible"].map({True: "#55A868", False: "#C44E52"}) + ax2.scatter(results["base_obj"], results["gen_obj"], alpha=0.8, c=colors, + edgecolors="black", linewidths=0.5, s=60) + lo = min(results["base_obj"].min(), results["gen_obj"].min()) * 0.9 + hi = max(results["base_obj"].max(), results["gen_obj"].max()) * 1.1 + ax2.plot([lo, hi], [lo, hi], "--", color="gray", linewidth=1, label="y = x") + ax2.set_xlabel("Baseline objective") + ax2.set_ylabel("Generated objective") + ax2.set_title("Per-sample (green=feasible, red=infeasible)") + ax2.legend() + + fig.tight_layout() + plt.show() + plt.close(fig) + + +def show_feasibility_bars(results) -> None: + """Bar chart comparing feasibility rates.""" + gen_rate = results["gen_feasible"].mean() + base_rate = results["base_feasible"].mean() + + fig, ax = plt.subplots(figsize=(5, 4)) + bars = ax.bar(["Generated", "Baseline"], [gen_rate, base_rate], + color=["#4C72B0", "#DD8452"], edgecolor="black") + ax.set_ylim(0, 1.15) + ax.set_ylabel("Feasible fraction") + ax.set_title("Feasibility rate") + for bar, val in zip(bars, [gen_rate, base_rate]): + ax.text(bar.get_x() + bar.get_width() / 2, bar.get_height() + 0.03, + f"{val:.0%}", ha="center", fontweight="bold") + fig.tight_layout() + plt.show() + plt.close(fig) + + +def show_design_comparison_grid(gen_designs, baseline_designs, results, + n_show: int = 6, problem=None) -> None: + """Show generated vs baseline with feasibility annotations. + + Uses ``problem.render()`` if available, otherwise falls back to imshow. + """ + n_show = min(n_show, len(gen_designs)) + + if problem is not None: + for i in range(n_show): + feas = "FEASIBLE" if results.iloc[i]["gen_feasible"] else "INFEASIBLE" + gap = results.iloc[i]["gen_minus_base"] + + result_g = problem.render(gen_designs[i]) + fig_g = result_g[0] if isinstance(result_g, tuple) else result_g + if hasattr(fig_g, "savefig"): + color = "green" if results.iloc[i]["gen_feasible"] else "red" + fig_g.suptitle(f"Generated {i} — {feas} (gap={gap:.1f})", + fontsize=11, color=color, y=1.02) + plt.show() + plt.close(fig_g) + else: + # Fallback: imshow grid + fig, axes = plt.subplots(2, n_show, figsize=(2.5 * n_show, 5)) + if n_show == 1: + axes = axes[:, None] + for i in range(n_show): + axes[0, i].imshow(gen_designs[i], cmap="gray", vmin=0, vmax=1, aspect="auto") + feas = "FEASIBLE" if results.iloc[i]["gen_feasible"] else "INFEASIBLE" + color = "green" if results.iloc[i]["gen_feasible"] else "red" + axes[0, i].set_title(f"Gen {i}\n{feas}", fontsize=9, color=color, fontweight="bold") + axes[0, i].axis("off") + axes[1, i].imshow(baseline_designs[i], cmap="gray", vmin=0, vmax=1, aspect="auto") + axes[1, i].set_title(f"Baseline {i}", fontsize=9) + axes[1, i].axis("off") + fig.suptitle("Generated vs Baseline", fontsize=13, y=1.02) + fig.tight_layout() + plt.show() + plt.close(fig) + + +# --------------------------------------------------------------------------- +# Metric helpers (NB02) +# --------------------------------------------------------------------------- + + +def mean_pairwise_l2(designs: np.ndarray) -> float: + """Average L2 distance between all pairs. Measures intra-set diversity.""" + flat = designs.reshape(designs.shape[0], -1) + n = flat.shape[0] + if n < 2: + return 0.0 + dists = [] + for i in range(n): + for j in range(i + 1, n): + dists.append(float(np.linalg.norm(flat[i] - flat[j]))) + return float(np.mean(dists)) + + +def mean_nn_distance_to_reference(designs: np.ndarray, reference: np.ndarray) -> float: + """Average nearest-neighbor distance to a reference set. Measures novelty.""" + q = designs.reshape(designs.shape[0], -1) + r = reference.reshape(reference.shape[0], -1) + nn_dists = [] + for i in range(q.shape[0]): + d = np.linalg.norm(r - q[i][None, :], axis=1) + nn_dists.append(float(np.min(d))) + return float(np.mean(nn_dists)) + + +# --------------------------------------------------------------------------- +# NB02 enhanced visualizations — pedagogical metric exploration +# --------------------------------------------------------------------------- + + +def show_residual_heatmaps( + gen_designs: np.ndarray, + baseline_designs: np.ndarray, + n_show: int = 6, +) -> None: + """Pixel-wise absolute difference between generated and baseline designs. + + Three rows: generated, baseline, |residual|. The residual row uses a + ``Reds`` colourmap (white = no error, dark red = large error). + """ + n_show = min(n_show, len(gen_designs)) + fig, axes = plt.subplots(3, n_show, figsize=(2.8 * n_show, 7)) + if n_show == 1: + axes = axes[:, None] + + for i in range(n_show): + axes[0, i].imshow(gen_designs[i], cmap="gray_r", vmin=0, vmax=1) + axes[0, i].set_title(f"Gen {i}", fontsize=9) + axes[0, i].axis("off") + + axes[1, i].imshow(baseline_designs[i], cmap="gray_r", vmin=0, vmax=1) + axes[1, i].set_title(f"Baseline {i}", fontsize=9) + axes[1, i].axis("off") + + diff = np.abs(gen_designs[i].astype(float) - baseline_designs[i].astype(float)) + axes[2, i].imshow(diff, cmap="Reds", vmin=0, vmax=1) + axes[2, i].set_title(f"|diff| mean={diff.mean():.3f}", fontsize=8) + axes[2, i].axis("off") + + axes[0, 0].set_ylabel("Generated", fontsize=11, rotation=90, labelpad=10) + axes[1, 0].set_ylabel("Baseline", fontsize=11, rotation=90, labelpad=10) + axes[2, 0].set_ylabel("|Residual|", fontsize=11, rotation=90, labelpad=10) + fig.suptitle( + "Pixel-level residuals: where do generated designs differ from baselines?", + fontsize=13, y=1.01, + ) + fig.tight_layout() + plt.show() + plt.close(fig) + + +def show_objective_residuals(results) -> None: + """Per-sample bar chart of objective gap (generated - baseline). + + Bars above zero mean the generated design is *worse* (higher compliance). + """ + gaps = results["gen_minus_base"] + colors = ["#C44E52" if g > 0 else "#55A868" for g in gaps] + + fig, ax = plt.subplots(figsize=(max(8, len(gaps) * 0.45), 4)) + ax.bar(range(len(gaps)), gaps, color=colors, edgecolor="black", linewidth=0.5) + ax.axhline(0, color="gray", linewidth=1, linestyle="--") + ax.set_xlabel("Sample index") + ax.set_ylabel("Objective gap (gen - baseline)") + ax.set_title( + "Per-sample objective residuals (green = generated is better, red = worse)", + fontsize=11, + ) + ax.set_xticks(range(len(gaps))) + fig.tight_layout() + plt.show() + plt.close(fig) + + +def show_volfrac_analysis(results, volfrac_tol: float = 0.05) -> None: + """Volume-fraction target vs actual scatter + error distribution.""" + fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(13, 5)) + + colors = results["gen_feasible"].map({True: "#55A868", False: "#C44E52"}) + ax1.scatter( + results["target_volfrac"], results["gen_volfrac"], + c=colors, edgecolors="black", linewidths=0.5, s=60, alpha=0.8, + ) + lo = min(results["target_volfrac"].min(), results["gen_volfrac"].min()) - 0.05 + hi = max(results["target_volfrac"].max(), results["gen_volfrac"].max()) + 0.05 + xs = np.linspace(lo, hi, 100) + ax1.plot(xs, xs, "--", color="gray", linewidth=1, label="Perfect match") + ax1.fill_between( + xs, xs - volfrac_tol, xs + volfrac_tol, + alpha=0.12, color="green", label=f"Tolerance (\u00b1{volfrac_tol})", + ) + ax1.set_xlabel("Target volume fraction") + ax1.set_ylabel("Generated volume fraction") + ax1.set_title("Constraint satisfaction: target vs actual volfrac") + ax1.legend(fontsize=9) + + errors = results["gen_volfrac"] - results["target_volfrac"] + ax2.hist(errors, bins=15, edgecolor="white", color="#4C72B0", alpha=0.8) + ax2.axvline(0, color="gray", linestyle="--", linewidth=1) + ax2.axvline(-volfrac_tol, color="red", linestyle=":", linewidth=1.5, label=f"\u00b1{volfrac_tol}") + ax2.axvline(volfrac_tol, color="red", linestyle=":", linewidth=1.5) + ax2.set_xlabel("Volume fraction error (actual \u2212 target)") + ax2.set_ylabel("Count") + ax2.set_title("Distribution of constraint errors") + ax2.legend(fontsize=9) + + fig.tight_layout() + plt.show() + plt.close(fig) + + +def show_spatial_distribution_comparison( + gen_designs: np.ndarray, + baseline_designs: np.ndarray, + train_reference: np.ndarray | None = None, +) -> None: + """Compare *where* material is placed on average across design sets. + + For binary/near-binary topology designs, pixel-intensity histograms are + uninformative (just two spikes at 0 and 1). Instead we show: + + - **Mean design images**: the average design across each set, revealing + where material tends to be placed. Differences highlight spatial + biases in the generator. + - **Per-design volume fraction distributions**: how much total material + each design uses, compared across sets. + """ + has_train = train_reference is not None + n_img = 3 if has_train else 2 + + fig, axes = plt.subplots(1, n_img + 1, figsize=(4.2 * (n_img + 1), 4), + gridspec_kw={"width_ratios": [1] * n_img + [1.3]}, + constrained_layout=True) + + # ── Mean design images ─────────────────────────────────────────── + sets = [("Generated", gen_designs, "#4C72B0"), + ("Baseline", baseline_designs, "#DD8452")] + if has_train: + sets.append(("Training", train_reference, "#55A868")) + + vmin, vmax = 0, 1 + for ax, (label, designs, _color) in zip(axes[:n_img], sets): + mean_img = designs.mean(axis=0) + im = ax.imshow(mean_img, cmap="gray_r", vmin=vmin, vmax=vmax) + ax.set_title(f"Mean {label}\n(n={len(designs)})", fontsize=11) + ax.axis("off") + fig.colorbar(im, ax=axes[:n_img].tolist(), shrink=0.75, + label="Avg. material density", pad=0.04) + + # ── Per-design volume fraction distributions ───────────────────── + ax_vf = axes[n_img] + for label, designs, color in sets: + vfracs = designs.reshape(designs.shape[0], -1).mean(axis=1) + ax_vf.hist(vfracs, bins=25, alpha=0.5, density=True, label=label, + color=color, edgecolor="white", linewidth=0.3) + ax_vf.set_xlabel("Volume fraction (per design)") + ax_vf.set_ylabel("Density") + ax_vf.set_title("Material-usage\ndistributions", fontsize=11) + ax_vf.legend(fontsize=9) + + fig.suptitle( + "Spatial distribution comparison \u2014 where does each set place material?", + fontsize=13, y=1.03, + ) + plt.show() + plt.close(fig) + + +def show_mmd_comparison_bar( + mmd_gen_base: float, + mmd_train_base: float, + mmd_random_base: float, +) -> None: + """Bar chart placing the generator's MMD in context. + + Shows three reference points so the raw MMD number becomes interpretable: + - Generated vs baseline (our metric -- same conditions) + - Train sample vs baseline (retrieval baseline -- no conditioning) + - Random vs baseline (upper bound / worst case) + """ + labels = [ + "Generated\nvs Baseline", + "Train sample\nvs Baseline\n(no conditioning)", + "Random\nvs Baseline\n(worst case)", + ] + values = [mmd_gen_base, mmd_train_base, mmd_random_base] + colors = ["#4C72B0", "#DD8452", "#C44E52"] + + fig, ax = plt.subplots(figsize=(7, 4.5)) + bars = ax.bar(labels, values, color=colors, edgecolor="black", linewidth=0.5, width=0.55) + for bar, v in zip(bars, values): + ax.text(bar.get_x() + bar.get_width() / 2, bar.get_height() + max(abs(v) for v in values) * 0.02, + f"{v:.4f}", ha="center", fontsize=11, fontweight="bold") + ax.set_ylabel("MMD (lower = more similar)") + ax.set_title("MMD in context \u2014 where does the generator sit?", fontsize=13) + ax.set_ylim(0, max(values) * 1.25) + fig.tight_layout() + plt.show() + plt.close(fig) + + +def show_pairwise_distance_heatmap( + designs: np.ndarray, + title: str = "Pairwise L2 distance among generated designs", +) -> None: + """Heatmap of pairwise L2 distances — visual proxy for diversity. + + A uniform warm colour off-diagonal means all designs differ roughly + equally (good diversity). Cool/dark blocks reveal clusters of + near-identical designs (partial mode collapse). + """ + from scipy.spatial.distance import cdist + + flat = designs.reshape(designs.shape[0], -1) + dists = cdist(flat, flat, "euclidean") + + fig, (ax, ax_hist) = plt.subplots( + 1, 2, figsize=(11, 5), + gridspec_kw={"width_ratios": [1.2, 1]}, + ) + + im = ax.imshow(dists, cmap="viridis") + ax.set_xlabel("Design index") + ax.set_ylabel("Design index") + ax.set_title(title, fontsize=11) + fig.colorbar(im, ax=ax, label="L2 distance", fraction=0.046, pad=0.04) + + # Histogram of off-diagonal distances + triu_idx = np.triu_indices(len(designs), k=1) + off_diag = dists[triu_idx] + ax_hist.hist(off_diag, bins=25, edgecolor="white", color="#4C72B0", alpha=0.8) + ax_hist.axvline(off_diag.mean(), color="#C44E52", linewidth=2, + linestyle="--", label=f"Mean = {off_diag.mean():.1f}") + ax_hist.set_xlabel("Pairwise L2 distance") + ax_hist.set_ylabel("Count") + ax_hist.set_title("Distribution of pairwise distances", fontsize=11) + ax_hist.legend(fontsize=9) + + fig.tight_layout() + plt.show() + plt.close(fig) + + +def show_embedding_scatter( + gen_designs: np.ndarray, + baseline_designs: np.ndarray, + train_reference: np.ndarray, +) -> None: + """PCA 2-D projection of generated, baseline, and training designs. + + Uses numpy SVD so there is no sklearn dependency. + """ + g = gen_designs.reshape(gen_designs.shape[0], -1).astype(np.float64) + b = baseline_designs.reshape(baseline_designs.shape[0], -1).astype(np.float64) + t = train_reference.reshape(train_reference.shape[0], -1).astype(np.float64) + + combined = np.vstack([g, b, t]) + mean = combined.mean(axis=0) + centered = combined - mean + _, _, Vt = np.linalg.svd(centered, full_matrices=False) + proj = centered @ Vt[:2].T + + ng, nb = len(g), len(b) + pg, pb, pt = proj[:ng], proj[ng:ng + nb], proj[ng + nb:] + + fig, ax = plt.subplots(figsize=(8, 7)) + ax.scatter(pt[:, 0], pt[:, 1], alpha=0.15, s=15, c="#AAAAAA", label=f"Training ({len(t)})") + ax.scatter(pb[:, 0], pb[:, 1], alpha=0.7, s=50, c="#DD8452", edgecolors="black", + linewidths=0.5, label=f"Baseline ({nb})", marker="s") + ax.scatter(pg[:, 0], pg[:, 1], alpha=0.8, s=60, c="#4C72B0", edgecolors="black", + linewidths=0.5, label=f"Generated ({ng})", marker="o") + ax.set_xlabel("PC 1") + ax.set_ylabel("PC 2") + ax.set_title("PCA projection \u2014 where do generated designs live in design space?", fontsize=12) + ax.legend(fontsize=10) + fig.tight_layout() + plt.show() + plt.close(fig) + + +def show_optimization_trajectories(opt_data: list[dict]) -> None: + """Plot optimization trajectories showing how generated designs warmstart optimization. + + Each entry in *opt_data* should be a dict with keys: + ``sample_idx``, ``obj_trajectory`` (list of floats), ``base_obj`` (float). + """ + n = len(opt_data) + fig, axes = plt.subplots(1, n, figsize=(5.5 * n, 4.5), squeeze=False) + + for i, d in enumerate(opt_data): + ax = axes[0, i] + objs = d["obj_trajectory"] + steps = list(range(len(objs))) + base = d["base_obj"] + + ax.plot(steps, objs, "o-", color="#4C72B0", linewidth=2, markersize=4, label="Optimizer") + ax.axhline(base, color="#DD8452", linestyle="--", linewidth=1.5, + label=f"Baseline = {base:.1f}") + ax.fill_between(steps, objs, base, alpha=0.12, color="#4C72B0") + + iog = objs[0] - base + fog = objs[-1] - base + cog = sum(o - base for o in objs) + + ax.set_title( + f"Sample {d['sample_idx']}\n" + f"IOG={iog:.1f} FOG={fog:.1f} COG={cog:.1f}", + fontsize=10, + ) + ax.set_xlabel("Optimization step") + ax.set_ylabel("Objective (compliance)") + ax.legend(fontsize=8, loc="upper right") + + fig.suptitle( + "Optimization from generated warmstarts \u2014 does the model give the optimizer a head start?", + fontsize=13, y=1.05, + ) + fig.tight_layout() + plt.show() + plt.close(fig) + + +def show_metric_summary_dashboard(summary_dict: dict) -> None: + """Multi-panel grouped bar chart summarizing all metric categories.""" + categories = { + "Simulation\nPerformance": [ + ("Obj gap (gen\u2212base)", summary_dict.get("objective_gap_mean", 0)), + ("Improvement rate", summary_dict.get("improvement_rate", 0)), + ], + "Constraint\nSatisfaction": [ + ("Gen feasible %", summary_dict.get("gen_feasible_rate", 0)), + ("Base feasible %", summary_dict.get("base_feasible_rate", 0)), + ], + "Distributional\nSimilarity": [ + ("MMD", summary_dict.get("mmd", 0)), + ], + "Diversity &\nNovelty": [ + ("Diversity (L2)", summary_dict.get("gen_diversity_l2", 0)), + ("Novelty (NN)", summary_dict.get("gen_novelty_to_train_l2", 0)), + ], + } + + fig, axes = plt.subplots(1, len(categories), figsize=(4 * len(categories), 4.5)) + palette = ["#4C72B0", "#DD8452", "#55A868", "#C44E52"] + + for ax, (cat_name, metrics), color in zip(axes, categories.items(), palette): + names = [m[0] for m in metrics] + vals = [m[1] for m in metrics] + bars = ax.barh(names, vals, color=color, edgecolor="black", linewidth=0.5) + for bar, v in zip(bars, vals): + ax.text(bar.get_width() + max(abs(v) for v in vals) * 0.03, + bar.get_y() + bar.get_height() / 2, + f"{v:.4f}" if abs(v) < 1 else f"{v:.1f}", + va="center", fontsize=9) + ax.set_title(cat_name, fontsize=11, fontweight="bold") + ax.set_xlim(left=min(0, min(vals) * 1.2)) + + fig.suptitle("Evaluation dashboard \u2014 how does the generator perform?", fontsize=14, y=1.03) + fig.tight_layout() + plt.show() + plt.close(fig)