diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 000000000..1e88f1ba8 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,19 @@ +# Force LF line endings for WSL and cross-platform (Linux/Mac/GitHub Actions) compatibility +# This prevents obscure bugs when running Windows-edited scripts on Linux runners. +* text=auto eol=lf + +*.py text eol=lf +*.rst text eol=lf +*.md text eol=lf +*.yml text eol=lf +*.yaml text eol=lf +*.cfg text eol=lf +*.toml text eol=lf +*.ipynb text eol=lf + +*.png binary +*.jpg binary +*.jpeg binary +*.gif binary +*.ico binary +*.pdf binary diff --git a/.github/actions/setup-conda-env/action.yml b/.github/actions/setup-conda-env/action.yml new file mode 100644 index 000000000..4867fd1b0 --- /dev/null +++ b/.github/actions/setup-conda-env/action.yml @@ -0,0 +1,32 @@ +name: Set up conda env +description: > + Install Miniconda and create/activate an EEG-ExPy conda environment from + the given env yml. Shared by the Test and Typecheck jobs so the two + don't drift apart. Environment name is not set in the yml files so local + installs can use any name they like. + +inputs: + environment-file: + required: true + description: Path to the conda environment yml file to install from. + activate-environment: + required: true + description: Name to give the created environment. + python-version: + required: false + description: > + Python version to pin (e.g. '3.8'). Overrides the version conda would + otherwise resolve from the environment file's constraints. When omitted, + conda resolves freely within the environment file's range. + +runs: + using: composite + steps: + - uses: conda-incubator/setup-miniconda@v3 + with: + environment-file: ${{ inputs.environment-file }} + activate-environment: ${{ inputs.activate-environment }} + python-version: ${{ inputs.python-version }} + auto-activate-base: false + channels: conda-forge + miniconda-version: "latest" diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 5f8953695..090a8b1f8 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -9,29 +9,20 @@ on: jobs: build: runs-on: ubuntu-22.04 + defaults: + run: + shell: bash -el {0} steps: - name: Checkout repo uses: actions/checkout@v3 with: - fetch-depth: 0 - - - name: Set up Python - uses: actions/setup-python@v4 - with: - python-version: 3.8 - - - name: Install dependencies - run: | - make install-deps-apt - python -m pip install --upgrade pip wheel - python -m pip install attrdict - - make install-deps-wxpython - - - name: Build project - run: | - make install-docs-build-dependencies + fetch-depth: 0 + - name: Set up conda env + uses: ./.github/actions/setup-conda-env + with: + environment-file: environments/eeg-expy-docsbuild.yml + activate-environment: eeg-expy-docsbuild - name: Get list of changed files id: changes @@ -40,7 +31,6 @@ jobs: git diff --name-only origin/master...HEAD > changed_files.txt cat changed_files.txt - - name: Determine build mode id: mode run: | @@ -48,13 +38,13 @@ jobs: echo "FULL_BUILD=true" >> $GITHUB_ENV echo "Detected non-example file change. Full build triggered." else - CHANGED_EXAMPLES=$(grep '^examples/.*\.py$' changed_files.txt | paste -sd '|' -) + # || true prevents grep's exit code 1 (no matches) from aborting the step + CHANGED_EXAMPLES=$(grep '^examples/.*\.py$' changed_files.txt | paste -sd '|' - || true) echo "FULL_BUILD=false" >> $GITHUB_ENV echo "CHANGED_EXAMPLES=$CHANGED_EXAMPLES" >> $GITHUB_ENV echo "Changed examples: $CHANGED_EXAMPLES" fi - - name: Cache built documentation id: cache-docs uses: actions/cache@v4 @@ -65,15 +55,12 @@ jobs: restore-keys: | ${{ runner.os }}-sphinx- - - name: Build docs - run: | - make docs + run: make docs - - name: Deploy Docs uses: peaceiris/actions-gh-pages@v3 - if: github.ref == 'refs/heads/master' # TODO: Deploy seperate develop-version of docs? + if: github.ref == 'refs/heads/master' || startsWith(github.ref, 'refs/heads/dev/') with: github_token: ${{ secrets.GITHUB_TOKEN }} publish_dir: doc/_build/html diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 5b4f3c6aa..6afc1f6ee 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -13,15 +13,33 @@ jobs: defaults: run: shell: bash -el {0} + continue-on-error: ${{ matrix.experimental == true }} strategy: fail-fast: false matrix: - os: ['ubuntu-22.04', windows-latest, macOS-latest] - python_version: ['3.8'] + os: [ubuntu-22.04, windows-latest, macOS-latest] + python_version: ['3.10'] + env_file: [environments/eeg-expy-full.yml] + env_name: [eeg-expy-full] include: - # PsychoPy currently restricted to <= 3.10 + # Experimental Full Build: Catch regressions on 3.11 early - os: ubuntu-22.04 - python_version: '3.10' + python_version: '3.11' + experimental: true + env_file: environments/eeg-expy-full.yml + env_name: eeg-expy-full + + # Experimental Streaming Builds: Verify acquisition/analysis on 3.12+ + - os: ubuntu-22.04 + python_version: '3.12' + experimental: true + env_file: environments/eeg-expy-streaming.yml + env_name: eeg-expy-streaming + - os: ubuntu-22.04 + python_version: '3.13' + experimental: true + env_file: environments/eeg-expy-streaming.yml + env_name: eeg-expy-streaming steps: - uses: actions/checkout@v2 @@ -29,22 +47,13 @@ jobs: if: "startsWith(runner.os, 'Linux')" run: | make install-deps-apt - - name: Install conda - uses: conda-incubator/setup-miniconda@v3 + - name: Set up conda env + uses: ./.github/actions/setup-conda-env with: - environment-file: environments/eeg-expy-full.yml - auto-activate-base: false + environment-file: ${{ matrix.env_file }} + activate-environment: ${{ matrix.env_name }} python-version: ${{ matrix.python_version }} - activate-environment: eeg-expy-full - channels: conda-forge - miniconda-version: "latest" - - - name: Fix PsychXR numpy dependency DLL issues (Windows only) - if: matrix.os == 'windows-latest' - run: | - conda install --force-reinstall numpy - - - name: Run eegnb install test + - name: Run eeg-expy install test run: | if [ "$RUNNER_OS" == "Linux" ]; then Xvfb :0 -screen 0 1024x768x24 -ac +extension GLX +render -noreset &> xvfb.log & @@ -58,7 +67,7 @@ jobs: Xvfb :0 -screen 0 1024x768x24 -ac +extension GLX +render -noreset &> xvfb.log & export DISPLAY=:0 fi - make test PYTEST_ARGS="--ignore=tests/test_run_experiments.py" + make test typecheck: @@ -71,19 +80,16 @@ jobs: fail-fast: false matrix: os: ['ubuntu-22.04'] - python_version: [3.9] + python_version: ['3.10'] steps: - uses: actions/checkout@v2 - - name: Install conda - uses: conda-incubator/setup-miniconda@v3 + - name: Set up conda env + uses: ./.github/actions/setup-conda-env with: environment-file: environments/eeg-expy-full.yml - auto-activate-base: false - python-version: ${{ matrix.python_version }} activate-environment: eeg-expy-full - channels: conda-forge - miniconda-version: "latest" + python-version: ${{ matrix.python_version }} - name: Typecheck run: | make typecheck diff --git a/.gitignore b/.gitignore index 8ce78c6c7..40e3c80c4 100644 --- a/.gitignore +++ b/.gitignore @@ -6,10 +6,15 @@ __pycache__ # Built as part of docs doc/auto_examples doc/_build +doc/generated/ +doc/sg_execution_times.rst # Built by auto_examples examples/visual_cueing/*.csv +# Jupytext-paired notebooks (the .py is the source of truth; .ipynb is auto-paired) +**.ipynb + # tests/coverage artifacts .coverage coverage.xml @@ -18,4 +23,4 @@ htmlcov # PyCharm .idea/ -**/.DS_Store \ No newline at end of file +**/.DS_Store diff --git a/conftest.py b/conftest.py new file mode 100644 index 000000000..649b2f0ef --- /dev/null +++ b/conftest.py @@ -0,0 +1,19 @@ +import importlib.util + + +def _is_available(module_name: str) -> bool: + try: + return importlib.util.find_spec(module_name) is not None + except (ImportError, ValueError): + return False + + +collect_ignore: list[str] = [] + +if not _is_available("psychopy"): + collect_ignore += [ + "eegnb/experiments", + "eegnb/devices/vr.py", + ] +elif not _is_available("psychxr"): + collect_ignore += ["eegnb/devices/vr.py"] diff --git a/doc/conf.py b/doc/conf.py index c449ea6fc..4982fc9ff 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -255,9 +255,9 @@ def setup(app): # Configurations for sphinx gallery -sphinx_gallery_conf = {'filename_pattern': '(?=.*r__)(?=.*.py)', - 'examples_dirs': ['../examples','../examples/visual_n170', '../examples/visual_p300','../examples/visual_ssvep', '../examples/visual_cueing', '../examples/visual_gonogo'], - 'gallery_dirs': ['auto_examples','auto_examples/visual_n170', 'auto_examples/visual_p300','auto_examples/visual_ssvep', 'auto_examples/visual_cueing', 'auto_examples/visual_gonogo'], +sphinx_gallery_conf = {'filename_pattern': '(?=.*r__)(?=.*.py)', + 'examples_dirs': ['../examples/visual_n170', '../examples/visual_p300','../examples/visual_ssvep', '../examples/visual_cueing', '../examples/visual_gonogo'], + 'gallery_dirs': ['auto_examples/visual_n170', 'auto_examples/visual_p300','auto_examples/visual_ssvep', 'auto_examples/visual_cueing', 'auto_examples/visual_gonogo'], 'within_subsection_order': FileNameSortKey, 'default_thumb_file': 'img/eeg-notebooks_logo.png', 'backreferences_dir': 'generated', # Where to drop linking files between examples & API diff --git a/doc/experiments/vprvep.rst b/doc/experiments/vprvep.rst new file mode 100644 index 000000000..fe309f475 --- /dev/null +++ b/doc/experiments/vprvep.rst @@ -0,0 +1,337 @@ +Visual Pattern Reversal VEP +=========================== + +The Pattern Reversal VEP (PR-VEP) is the most widely studied visual +evoked potential paradigm. A checkerboard pattern swaps its black and +white squares at a regular rate (typically 2 reversals per second) while +the participant fixates on a central cross. Each reversal elicits a +stereotyped triphasic waveform whose most prominent feature is the +**P100**, a positive deflection occurring ~100 ms after the reversal at +midline occipital electrodes. The other components are a smaller N75 +before it and an N145 after it. + +This implementation runs both ISCEV check-size variants in a single +session (large + small) and supports stereoscopic monocular presentation +through a Meta Quest HMD via PsychoPy / psychxr / Meta-Link. Trials are +analysed under two reference schemes (Oz−Fz ISCEV-standard and linked +mastoid M1+M2) and a wide range of differential biomarkers are reported, +including inter-ocular latency difference (IOLD), check-size slope +difference, hemispheric asymmetry, inter-ocular Δ-asymmetry contrasts, +lateral extrastriate readouts at P7/P8, and bootstrap confidence +intervals on the P100 latency. + +**PR-VEP Experiment Notebook Examples:** + +.. include:: ../auto_examples/visual_vep/index.rst + + +Running the Experiment +---------------------- + +.. code-block:: python + + from eegnb.devices.eeg import EEG + from eegnb.experiments.visual_vep import VisualPatternReversalVEP + + eeg = EEG(device='cyton') + experiment = VisualPatternReversalVEP( + eeg=eeg, + save_fn='my_vep_recording.csv', + block_duration_seconds=50, + block_trial_size=100, + reps_per_condition=2, # 4 conditions × 2 reps = 8 blocks total + use_vr=True, # False for monitor mode + ) + experiment.run() + +The display refresh rate is auto-detected from the active window +(``displayRefreshRate`` in VR, ``getActualFrameRate()`` on a monitor) and +must be an integer multiple of the 2 reversals/sec rate; 60, 90, 120, and +144 Hz are all supported. + + +Participant Preparation +----------------------- + +The PR-VEP is sensitive to the optical quality of the retinal image. +Participants who normally wear glasses or contact lenses **must** wear +their corrective lenses during the test. Uncorrected refractive error +blurs the checkerboard's high spatial frequency edges, which attenuates +the P100 amplitude and can increase its latency — mimicking a genuine +neural conduction delay. This is especially important when comparing +latencies between eyes or across sessions, and even more so for the +small-check (high spatial-frequency) variant where blur dominates. + +ISCEV guidelines require that visual acuity be documented for each +recording session. If a participant's corrected acuity is worse than +6/9 (20/30), note it alongside the data so that downstream analysis can +account for it. + + +Stimulus Parameters +------------------- + +Two ISCEV check-size variants are presented per session [Odom2016]_: + +- **Large checks**: 1.0° of visual angle (60 arcmin, 0.5 cpd) — the + standard clinical PR-VEP stimulus, dominant low-spatial-frequency + drive of the foveal P100. +- **Small checks**: 0.25° of visual angle (15 arcmin, 2.0 cpd) — the + high-spatial-frequency variant. Demyelinated optic-nerve fibres + preferentially delay this response, so the latency *difference* + between large- and small-check P100 amplifies subtle demyelination + that the large-check IOLD alone misses. + +Other parameters: + +- **Reversal rate**: 2 reversals per second (ISCEV standard, range + 1–3 rev/s) [Odom2016]_ — fast enough for a discrete transient P100, + slow enough to stay well below photosensitive-seizure trigger + frequencies. +- **Field size**: 16° square (``ISCEV_FIELD_DEG = 16.0``), rendered at + the runtime-derived pixels-per-degree of the active display. +- **Contrast**: high-contrast black/white, mean luminance held constant + through the inter-trial interval to avoid adaptation. +- **Fixation**: thin red cross, 0.25° × 0.05° (15 × 3 arcmin), sized in + pixels via ``ppd`` so it stays sub-check at every variant and does not + mask foveal stimulation. + +Block scheduling: 4 conditions (left/right eye × large/small check) × +``reps_per_condition`` repetitions, shuffled at construction time. With +the default ``reps_per_condition=2`` this gives 8 blocks of 50 s, ~100 +reversals per block, ~200 reversals per (eye × size) condition, and +~400 reversals per eye total. + +Block-start markers (100, 101, 102, 103) are pushed at the first +reversal of each block, encoding the full condition (bit 0 = eye, bit 1 += size class). Per-reversal markers are ``1`` for left-eye blocks and +``2`` for right-eye blocks. Both decode to the per-condition labels in +the analysis pipeline. + + +Monitor vs VR +------------- + +The experiment supports both standard monitor presentation and Meta +Quest (VR) presentation via ``use_vr=True``. + +**VR mode is preferred** for several reasons: + +- Each eye sees the checkerboard independently via per-eye HMD buffers, + so monocular blocks need no manual eye closure and have no light + leakage from the unstimulated eye. +- The OpenXR / LibOVR compositor supplies a per-frame predicted photon + time (``app_motion_to_photon_latency_s``), which is logged to the + ``_timing.csv`` sidecar and applied trial-by-trial in the analysis to + cancel most of the output-side display latency (render queue, + compositor buffering, scan-out, HMD persistence). +- A keyboard (spacebar) or Quest controller trigger is required to advance + past block instructions and to exit the experiment. The presented + stimulus is otherwise hands-free during the block — there is no manual + eye covering required. + +A residual unmeasured chain delay (Quest Link transport + panel scan-out ++ LCD response + Cyton RF) of ~25 ± 15 ms remains, and is treated as a +fixed ``link_panel_lag`` constant in the analysis. Differential +biomarkers (IOLD, check-size slope difference, amplitude ratios, +Δ-asymmetry contrasts) are robust to this residual because both eyes +share the same chain — the residual cancels in any L−R contrast. +*Absolute* P100 latency (vs clinical norms) does *not* survive this +residual and is not reported as a clinical number. + +A separate stimulus calibration caveat applies to VR: the Quest panel is +not photometrically calibrated to a specific cd/m² or contrast ratio, so +absolute P100 *amplitude* will diverge from clinical norms by an unknown +constant factor. As with the latency offset, differential / within- +subject biomarkers remain interpretable; absolute amplitudes are not +interchangeable with clinical norms. + +In monitor mode the software flip is the only timing source, so any +fixed display-pipeline latency must be characterised separately if +absolute latency is needed. The most accurate option is a photodiode +taped over a sync patch on the screen with its analogue output routed +into a spare Cyton input (auxiliary channel or a free EEG channel) — +the diode fires when actual photons arrive at the screen, so epoching +off the diode trace aligns trials to true stimulus onset with sub-frame +precision and removes the entire display-chain residual. + + +Electrode Placement +------------------- + +The P100 is generated in occipital cortex. The default cap montage in +``00x__pattern_reversal_run_experiment.py`` is an 8-channel OpenBCI +Cyton layout: + +.. code-block:: python + + ch_names = ["Fz", "Pz", "P7", "P8", "O1", "O2", "Oz", "M2"] + # Reference: M1 (Cyton SRB / hardware reference) + # Ground: A2 + +Channel roles in the analysis pipeline: + +1. **Oz** — the primary ISCEV electrode; highest-amplitude P100. All + single-channel biomarkers (IOLD, check-size slope, amplitude ratio, + bootstrap CI) are computed at Oz. +2. **O1, O2** — lateral occipital. Used for the hemispheric-asymmetry + biomarker, but interpretation is confounded by paradoxical + lateralization (V1 dipoles in the calcarine fold project across the + midline). The inter-ocular Δ-asymmetry contrast separates eye- + dependent skew from stationary anatomical / electrode-stationary + asymmetries. +3. **P7, P8** — lateral parieto-occipital electrodes. Because they largely + bypass the "paradoxical lateralization" seen at O1/O2, they provide a + cleaner, more direct readout of each hemisphere independently. They are + essential for side-localizing cortical asymmetry and drive three biomarkers: + lateral propagation latency (7), P7/P8 hemispheric asymmetry (8), and the + lateral-hemisphere composite (9). +4. **Pz** — parietal midline. Used in the topology QC check to confirm + the expected ``Oz > O1/O2 > P7/P8/Pz`` posterior gradient. +5. **Fz** — frontal midline. Doubles as (a) the reference electrode for + the ISCEV Oz−Fz scheme, and (b) when the linked-mastoid scheme is + active, the Halliday polarity-inversion check: a genuine V1-generated + P100 produces an *inverted* (negative) deflection at Fz at the same + latency, confirming a posterior dipole. +6. **M1 (reference) + M2** — mastoids. M1 is the Cyton SRB hardware + reference (zero by construction). M2 is recorded; together they form + the linked-mastoid (M1+M2)/2 reference scheme used alongside Oz−Fz. + +For the alternative ``mark-iv`` montage (Thinkpulse dry actives at 4× +gain), edit ``ch_names`` in ``00x__pattern_reversal_run_experiment.py`` +to the Thinkpulse channel set. + + +Reference Schemes +----------------- + +The analysis pipeline runs every biomarker under two reference schemes +in sequence and prints both: + +- **Oz − Fz (ISCEV standard)** — directly comparable with clinical + PR-VEP norms. Sensitive to Fz contact quality. +- **Linked mastoid (M1+M2)/2** — more stable when Fz contact is weak, + unaffected by frontal EMG, and the only scheme under which the Fz + Halliday polarity-inversion check is meaningful (Fz is zero by + construction under the Oz−Fz reference). + +Each scheme produces its own set of plots and biomarkers; agreement +between the two is itself a quality indicator. + + +Biomarkers +---------- + +The analysis script ``01r__pattern_reversal_viz.py`` reports several differential biomarkers, in addition to the per-condition P100 latency and amplitude at Oz. These are grouped by their primary clinical utility: + +**Remyelination & Longitudinal Tracking (Demyelination Indicators)** +These metrics are the highest value for tracking repair because they isolate pathway delays and cancel out daily systemic noise or anatomical distortions. + +1. **Inter-ocular latency difference (IOLD)** — signed ``L − R`` P100 latency at Oz. ``> 8 ms`` is the most-cited clinical threshold for unilateral demyelination; robust to the ``link_panel_lag`` residual because both eyes share the same chain. The gold standard for longitudinal tracking of optic nerve repair. +2. **Inter-ocular check-size slope difference** — demyelinated (and repairing) optic nerves preferentially delay the small-check response. The ``L_eye − R_eye`` difference of latency-vs-check-size slopes amplifies asymmetric demyelination and is an excellent leading indicator of repair. +5. **Inter-ocular Δ-asymmetry contrast** — ``(O1−O2)|L_eye − (O1−O2)|R_eye``. By subtracting the spatial asymmetry between eyes, it cancels stationary anatomical distortions (e.g., asymmetrical cortical folding). The remaining Δ strictly represents pathway-driven changes over time. +6. **Bootstrap confidence intervals on P100 / IOLD** — 1000 trial-resamples per eye recompute the trimmed-mean P100 location, giving a 95% CI. The IOLD CI is flagged separately for excluding zero (statistically separable) and excluding ±8 ms (clinically meaningful). Tracking this CI width proves remyelination is statistically real. +7. **Lateral extrastriate P100 (P7 / P8) & Propagation** — per-channel P100 detection plus the Oz → lateral propagation latency (``P7−Oz``, ``P8−Oz``). While IOLD tracks *optic nerve* myelination, this tracks *intracortical* white matter myelination. A severe delay here indicates a cortical white matter issue (e.g., TBI, concussion, or cortical demyelination), not an optic nerve problem. + +**Axonal & Compressive Indicators** +While latency measures myelin, amplitude measures the surviving nerve fibers (axons). + +3. **Inter-ocular amplitude ratios** — ``L / R`` P100 amplitude at Oz. Tracks axonal loss (e.g., after severe optic neuritis) or compressive lesions (e.g., tumor blocking the signal). Less specific than latency but critical for differentiating slow recovery from permanent damage. +- **N75–P100–N145 Peak-to-Peak Amplitude** — measures the total "energy" of the visual response, which is more stable than absolute voltage against baseline drift. + +**Morphological & Cortical Indicators** +Focus on waveform shape and direct hemispheric comparisons. + +4. **Hemispheric asymmetry (O1 vs O2)** — per-eye P100 latency/amplitude at O1 and O2. Due to paradoxical lateralization, a deficit at O1 (left scalp) suggests *right* hemisphere involvement. +8. **Lateral extrastriate asymmetry (P7 vs P8)** — direct hemispheric read without the paradoxical lateralization of V1. Flags stationary asymmetry vs. eye-dependent asymmetry. +9. **Combined lateral hemisphere composites** — ``(O1+P7)/2`` vs ``(O2+P8)/2`` traces. Provides ~√2 better SNR than O1/O2 alone. +- **W-Peak (Bifurcated P100) Analysis** — detects if the P100 splits into two peaks (a "W" shape), which often indicates a central scotoma (macular dysfunction) or severe uncorrected refractive error causing spatial desynchronization. + +**Quality Control** + +10. **Topology QC** (linked-mastoid only) — confirms (a) the ``Oz > Pz > 0`` posterior-gradient at the Oz P100 latency, and (b) the Halliday frontal polarity inversion (Fz negative at the Oz P100 latency) which strongly confirms a true V1 generator. + +All biomarkers are computed twice (once per reference scheme) and printed alongside their expected normal ranges, so a session's clinical interpretability is visible at a glance. + + +Latency Resolution +------------------ + +The precision of a P100 latency estimate depends on three factors: + +1. **Display refresh rate** — determines the worst-case stimulus timing + jitter. At 120 Hz this is ~4.2 ms per frame; at 60 Hz, ~16.7 ms. +2. **EEG sampling rate** — the Cyton samples at 250 Hz, giving 4 ms + between samples. Without interpolation, the peak latency is locked + to the nearest sample and cannot resolve shifts smaller than 4 ms. +3. **Number of trials** — averaging more reversals reduces noise in the + ERP waveform, tightening the bootstrap CI around the peak estimate. + The default 8-block design yields ~400 reversals per eye (split + across the two check sizes). + +To achieve sub-sample precision, ``vep_utils.get_pr_vep_latencies`` +fits a parabola through the peak sample and its two neighbours and +takes the vertex as the true peak — bringing effective resolution to +~0.5 ms at 250 Hz, well below the sample interval. + +For studies that require detecting latency shifts of 1–2 ms (e.g. +within-subject longitudinal comparisons), the combination of 120 Hz +display, parabolic interpolation, and the default 8-block design is +recommended. The trimmed-mean evoked estimator (used throughout the +analysis) further reduces sensitivity to occasional blink-contaminated +trials that survive amplitude rejection. + + +Longitudinal Tracking +--------------------- + +To monitor P100 latency over time — for example during nerve recovery +or longitudinal intervention tracking — record multiple sessions using +the same subject and session numbering scheme. + +The analysis pipeline is split into two scripts so this is fast: + +- ``01r__pattern_reversal_viz.py`` runs the per-session analysis. In + addition to the figures and stdout output, it writes a + ``biomarkers.json`` file into the recording directory containing + every biomarker under both reference schemes plus session metadata + (subject, session, device, site, trial counts, PC-side and + ``link_panel_lag`` timing values). +- ``02r__pattern_reversal_longitudinal.py`` reads every session's + ``biomarkers.json`` for a given subject, builds a flattened + ``pandas.DataFrame``, and plots IOLD, per-eye P100 latency, + check-size slope difference, O1/O2 and P7/P8 Δ-asymmetry, and + amplitude ratio as a function of session. Bootstrap 95% CI bands are + drawn on the IOLD and per-eye P100 panels so a real shift is visually + separable from session noise. + +This split means new longitudinal points are added in seconds (read +JSON, plot) rather than minutes (re-load EEG, filter, epoch, +bootstrap), and individual sessions can be re-analysed without +invalidating the rest of the trend series. + +Before attributing a latency change to an intervention (like remyelination), establish a **baseline**: record at least 3–5 sessions over 1–2 weeks under the same conditions. Latency shifts can be caused by many non-pathological factors, including subject fatigue, alertness, caffeine intake, body temperature changes, or ocular differences (like pupil size or slight changes in focus/fixation). The longitudinal script supports a ``BASELINE_LAST_SESSION_NB`` cutoff that limits the summary statistics to those initial sessions, capturing the natural day-to-day variability for your setup and participant. Subsequent sessions need to fall significantly outside this baseline variability (e.g., beyond the ``mean ± std``) to be considered a true physiological or structural shift. + + +Timing Notes +------------ + +Two sidecar files are written alongside each recording to let you check +timing after the fact: + +- ``{save_fn}_timing.csv`` — per-trial software and compositor + timestamps and their delta, including the per-trial + ``app_motion_to_photon_latency_s`` used by the analysis. +- ``{save_fn}_frame_stats.json`` — per-frame intervals and dropped- + frame count (150%-of-refresh threshold). + + + + +References +---------- + +.. [Odom2016] Odom JV, Bach M, Brigell M, Holder GE, McCulloch DL, Mizota A, + Tormene AP; International Society for Clinical Electrophysiology of Vision. + **ISCEV standard for clinical visual evoked potentials: (2016 update).** + *Documenta Ophthalmologica* 133(1):1-9. doi:10.1007/s10633-016-9553-y diff --git a/doc/getting_started/installation.rst b/doc/getting_started/installation.rst index c4bcda118..e29352af5 100644 --- a/doc/getting_started/installation.rst +++ b/doc/getting_started/installation.rst @@ -44,7 +44,7 @@ Use the following commands to download the repo, create and activate a conda or **Environment file options** - *Python 3.8 - 3.10:* + *Python 3.10:* - `eeg-expy-full`: Install all dependencies @@ -52,7 +52,7 @@ Use the following commands to download the repo, create and activate a conda or - `eeg-expy-streamstim`: Combined streaming and stimulus presentation - *Python 3.8 - 3.13:* + *Python 3.10 - 3.13:* - `eeg-expy-docsbuild`: Documentation @@ -68,7 +68,7 @@ Use the following commands to download the repo, create and activate a conda or # Create conda environment from chosen eeg-expy-*.yml # The Python version will be pinned by the environment file - conda env create -n eeg-expy --file=environments/eeg-expy-full.yml + conda env create -n eeg-expy -f environments/eeg-expy-full.yml # Activate the environment conda activate eeg-expy diff --git a/doc/getting_started/streaming.md b/doc/getting_started/streaming.md index f28ebde87..53792b74e 100644 --- a/doc/getting_started/streaming.md +++ b/doc/getting_started/streaming.md @@ -63,6 +63,9 @@ be run to begin the notebooks interfacing with the bluemuse backend. **Needed Parameters:** **Optional Parameters:** +**Cyton USB-Serial Latency (Windows):** +A note on Cyton USB-serial latency: the Windows FTDI driver default `LatencyTimer` of 16 ms causes batched marker delivery and corrupts millisecond-grade marker timing. The experiment scripts assert `LatencyTimer = 1 ms` at startup; recordings predating this assertion contain a ~15 ms hardware buffering lag on the marker channel that must be subtracted before comparing absolute latencies across sessions. + ### OpenBCI Cyton + Daisy ![fig](../img/cyton_daisy.png) **Device Name:** *'cyton_daisy'* or *'cyton_daisy_wifi'* with WiFi Shield diff --git a/doc/index.rst b/doc/index.rst index 278093db3..ccdd6f85c 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -19,6 +19,7 @@ experiments/vn170 experiments/vp300 experiments/vssvep + experiments/vprvep experiments/cueing experiments/gonogo experiments/all_examples diff --git a/eegnb/__init__.py b/eegnb/__init__.py index 02ebd828f..a59bd0949 100644 --- a/eegnb/__init__.py +++ b/eegnb/__init__.py @@ -32,7 +32,7 @@ def _get_recording_dir( """A subroutine of get_recording_dir that accepts subject and session as strings""" # folder structure is /DATA_DIR/experiment/board_name/site/subject/session/*.csv recording_dir = ( - Path(data_dir) / experiment / site / board_name / subject_str / session_str + Path(data_dir or DATA_DIR) / experiment / site / board_name / subject_str / session_str ) # check if directory exists, if not, make the directory diff --git a/eegnb/analysis/__init__.py b/eegnb/analysis/__init__.py index e69de29bb..2fde929d0 100644 --- a/eegnb/analysis/__init__.py +++ b/eegnb/analysis/__init__.py @@ -0,0 +1 @@ +from eegnb.analysis import vep_utils # noqa: F401 diff --git a/eegnb/analysis/recording_quality.py b/eegnb/analysis/recording_quality.py new file mode 100644 index 000000000..60cb091b6 --- /dev/null +++ b/eegnb/analysis/recording_quality.py @@ -0,0 +1,180 @@ +"""Per-recording contact quality diagnostic for an eegnb session directory. + +Can be used programmatically (returns a formatted string or structured dict) +or from the command line via ``tools/recording_quality.py``. +""" +import pathlib + +import numpy as np +import pandas as pd + + +EEG_CHANNEL_CANDIDATES = [ + 'Fz', 'Pz', 'P7', 'P8', 'O1', 'O2', 'Oz', 'M1', 'M2', + 'Cz', 'C3', 'C4', 'F3', 'F4', 'T7', 'T8', 'F7', 'F8', + 'AF7', 'AF8', 'TP9', 'TP10', +] + +BIG_SAMPLE_UV = 200.0 +DRIFT_WIN_SECS = 10 +NOISE_FACTOR_FLAG = 1.5 + + +def _detect_sfreq(timestamps: np.ndarray) -> float: + diffs = np.diff(timestamps) + diffs = diffs[(diffs > 0) & (diffs < 1.0)] + return float(1.0 / np.median(diffs)) if len(diffs) else 250.0 + + +def _channel_metrics(x: np.ndarray, sfreq: float) -> dict: + x = x - np.mean(x) + win = max(1, int(DRIFT_WIN_SECS * sfreq)) + drift = float(pd.Series(x).rolling(win, center=False).mean().dropna().pipe( + lambda s: s.max() - s.min() + )) if len(x) >= win else 0.0 + return { + 'std': float(np.std(x)), + 'p99': float(np.percentile(np.abs(x), 99)), + 'max': float(x.max()), + 'min': float(x.min()), + 'drift': drift, + 'pct_big': 100.0 * float(np.mean(np.abs(x) > BIG_SAMPLE_UV)), + } + + +def check_session(session_dir: pathlib.Path) -> dict: + """Return structured quality metrics for a session directory. + + Reads every ``recording_*.csv`` (excluding ``*_timing.csv``) and computes + per-channel std / p99 / drift metrics. + + Returns a dict with keys: + report : str — formatted text report (same as report_session()) + flagged_channels : list — channel names flagged in any recording + (std or drift > 1.5× group median) + shared_ref_suspect : bool — True when ≥ half of channels are flagged, + indicating M1/SRB loose rather than isolated + electrode contacts + """ + session_dir = pathlib.Path(session_dir).expanduser().resolve() + recs = sorted( + p for p in session_dir.glob('recording_*.csv') + if not p.stem.endswith('_timing') and not p.name.endswith('.excluded') + ) + + lines = [] + w = lines.append + + if not recs: + w(f'No recording_*.csv files found in {session_dir}') + return {'report': '\n'.join(lines), 'flagged_channels': [], 'shared_ref_suspect': False} + + w(f'Session: {session_dir.name}') + w(f'Found {len(recs)} recording(s)') + w('') + w('Column legend (all values in µV, raw signal — compare channels relative to each other):') + w(f' std Standard deviation — overall noise level per channel.') + w(f' p99|x| 99th percentile of |signal| — sustained noise floor, robust to single spikes.') + w(f' max/min Largest/smallest raw sample — flags electrode pops or movement artifacts.') + w(f' drift Range of 10 s rolling mean — slow DC wander from drying gel or loose contact.') + w(f' %>200 % of samples exceeding {BIG_SAMPLE_UV:.0f} µV — mainly useful comparing recordings, not channels.') + w('') + w('Interpreting the table: look for channels with std or drift significantly higher than') + w('the others. A single outlier = bad electrode contact. All channels inflated = loose reference.') + w('') + + rows = [] + flagged_channels: set = set() + + for p in recs: + df = pd.read_csv(p) + eeg_chs = [c for c in df.columns if c in EEG_CHANNEL_CANDIDATES] + sfreq = _detect_sfreq(df['timestamps'].values) if 'timestamps' in df else 250.0 + rec_min = len(df) / sfreq / 60.0 + w(f'=== {p.stem.split("recording_")[-1]} ' + f'({rec_min:.1f} min, {len(df)} samples, ~{sfreq:.0f} Hz) ===') + + ch_metrics = {} + for ch in eeg_chs: + ch_metrics[ch] = _channel_metrics(df[ch].values.astype(float), sfreq) + rows.append({'rec': p.stem.split('recording_')[-1], 'ch': ch, **ch_metrics[ch]}) + + med_std = float(np.median([m['std'] for m in ch_metrics.values()])) + med_drift = float(np.median([m['drift'] for m in ch_metrics.values()])) + + w(f' {"ch":>4} {"std":>8} {"std×":>5} {"p99|x|":>8} ' + f'{"max":>8} {"min":>8} {"drift":>8} {"drift×":>6} {"%>{:.0f}".format(BIG_SAMPLE_UV):>6}') + for ch, m in ch_metrics.items(): + std_x = m['std'] / med_std if med_std > 0 else 1.0 + drift_x = m['drift'] / med_drift if med_drift > 0 else 1.0 + flagged = std_x > NOISE_FACTOR_FLAG or drift_x > NOISE_FACTOR_FLAG + if flagged: + flagged_channels.add(ch) + flag_str = '⚑' if flagged else ' ' + w(f' {ch:>4} {m["std"]:>8.1f} {std_x:>5.2f} {m["p99"]:>8.1f} ' + f'{m["max"]:>8.1f} {m["min"]:>8.1f} ' + f'{m["drift"]:>8.1f} {drift_x:>6.2f} {m["pct_big"]:>6.2f} {flag_str}') + w(f' (median std={med_std:.1f} µV median drift={med_drift:.1f} µV ' + f'flag threshold: ×>{NOISE_FACTOR_FLAG})') + + # Shared-reference diagnosis per recording: if ≥ half of channels are + # flagged the noise is uniform, pointing at a loose M1/SRB rather than + # isolated electrode contacts. + n_flagged_rec = sum( + 1 for m in ch_metrics.values() + if (m['std'] / med_std if med_std > 0 else 1.0) > NOISE_FACTOR_FLAG + or (m['drift'] / med_drift if med_drift > 0 else 1.0) > NOISE_FACTOR_FLAG + ) + if n_flagged_rec >= len(eeg_chs) / 2: + w(f' ⚑ SHARED REFERENCE SUSPECT — {n_flagged_rec}/{len(eeg_chs)} channels flagged ' + f'(all-channel inflation → M1/SRB loose)') + elif n_flagged_rec: + w(f' ⚑ {n_flagged_rec} channel(s) flagged — isolated contact issue(s)') + w('') + + df_all = pd.DataFrame(rows) + summary = df_all.groupby('rec').agg( + med_std=('std', 'median'), + med_p99=('p99', 'median'), + med_drift=('drift', 'median'), + ) + + w('=== PER-RECORDING SUMMARY (median across EEG channels) ===') + w(f' {"rec":<22} {"med_std":>8} {"med_p99":>8} {"med_drift":>10}') + for rec in summary.index: + s = summary.loc[rec] + w(f' {rec:<22} {s.med_std:>8.1f} {s.med_p99:>8.1f} {s.med_drift:>10.1f}') + + w('') + w('=== EXCLUSION CANDIDATES ===') + baseline = float(summary.med_std.min()) + any_exclude = False + for rec in summary.index: + factor = float(summary.loc[rec, 'med_std']) / baseline + flag = factor > NOISE_FACTOR_FLAG + any_exclude |= flag + w(f' {rec}: median_std={summary.loc[rec,"med_std"]:.1f} ' + f'({factor:.2f}x) [{"EXCLUDE?" if flag else "ok"}]') + + w('') + if any_exclude: + w('⚑ One or more recordings flagged as substantially noisier.') + + # Determine overall shared-reference suspicion across the whole session + all_ch_names = list(dict.fromkeys(r['ch'] for r in rows)) # ordered unique + shared_ref_suspect = len(flagged_channels) >= len(all_ch_names) / 2 + + return { + 'report': '\n'.join(lines), + 'flagged_channels': sorted(flagged_channels), + 'shared_ref_suspect': shared_ref_suspect, + } + + +def report_session(session_dir: pathlib.Path) -> str: + """Return a formatted quality report string for a session directory. + + Reads every ``recording_*.csv`` (excluding ``*_timing.csv``) and reports + per-channel std / p99 / drift, plus exclusion candidates. + """ + return check_session(session_dir)['report'] diff --git a/eegnb/analysis/utils.py b/eegnb/analysis/utils.py index d9450981d..e7233ea76 100644 --- a/eegnb/analysis/utils.py +++ b/eegnb/analysis/utils.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import copy from copy import deepcopy import math @@ -5,10 +7,12 @@ import sys from collections import OrderedDict from glob import glob -from typing import Union, List -from time import sleep, time -import os +from pathlib import Path +from typing import TYPE_CHECKING, Union, List +if TYPE_CHECKING: + from eegnb.devices.eeg import EEG +from time import sleep, time import pandas as pd import numpy as np import matplotlib.pyplot as plt @@ -23,9 +27,11 @@ from scipy.signal import lfilter, lfilter_zi from eegnb import _get_recording_dir -from eegnb.devices.eeg import EEG from eegnb.devices.utils import EEG_INDICES, SAMPLE_FREQS -from pynput import keyboard +try: + from pynput import keyboard +except ImportError: + keyboard = None # this should probably not be done here sns.set_context("talk") @@ -181,9 +187,12 @@ def load_data( session_str = "*" if session == "all" else f"session{session_int:03}" recdir = _get_recording_dir(device_name, experiment, subject_str, session_str, site, data_dir) - data_path = os.path.join(data_dir, recdir, "*.csv") + data_path = recdir / "*.csv" - fnames = glob(str(data_path)) + # Primary recordings are named recording_.csv (one underscore). + # Sidecar files follow BIDS convention with an additional suffix, e.g. recording__timing.csv. + # Filter to only primary recordings. + fnames = [f for f in glob(str(data_path)) if Path(f).stem.count('_') == 1] if len(fnames) == 0: raise Exception("No filenames found in folder: %s" %data_path) diff --git a/eegnb/analysis/vep_utils.py b/eegnb/analysis/vep_utils.py new file mode 100644 index 000000000..605da9625 --- /dev/null +++ b/eegnb/analysis/vep_utils.py @@ -0,0 +1,386 @@ +import numpy as np +from mne import Evoked, EvokedArray +from scipy.stats import trim_mean + +ISCEV_CHECK_DEG_LARGE = 1.0 +ISCEV_CHECK_DEG_SMALL = 0.25 + +# Default thresholds used by clinical PR-VEP biomarkers. Override per-call +# only if you have a paradigm-specific reason to. +IOLD_FLAG_MS = 8.0 # |L − R P100 latency| above this flags suspected unilateral demyelination +LOG2_AMP_FLAG = 1.0 # |log2(L/R P100 amplitude)| above this flags inter-ocular drive imbalance + + +def print_latency(peak_name, peak_latency, peak_channel, uv): + peak_latency = round(peak_latency * 1e3, 2) # convert to milliseconds + uv = round(uv * 1e6, 2) # convert to µV + print('{} Peak of {} µV at {} ms in peak_channel {}'.format(peak_name, uv, peak_latency, peak_channel)) + + +def get_peak(erp_name, evoked_potential, peak_time_min, peak_time_max, mode): + """Find peak latency with sub-sample precision using parabolic interpolation. + + MNE's get_peak returns the sample with the largest value, limiting + resolution to the sample interval (4 ms at 250 Hz). A parabolic fit + through the peak sample and its two neighbours recovers the true peak + location between samples, giving ~0.5 ms precision at 250 Hz. + + This implementation avoids MNE's strict positive/negative threshold + requirements to support waveforms with large baseline shifts. + """ + # Step 1: find the sample-level peak + time_mask = (evoked_potential.times >= peak_time_min) & (evoked_potential.times <= peak_time_max) + if not np.any(time_mask): + print(f'{erp_name}: no samples in window {peak_time_min}-{peak_time_max}') + return None + + data_win = evoked_potential.data[:, time_mask] + if mode == 'pos': + ch_idx, t_idx_win = np.unravel_index(np.argmax(data_win), data_win.shape) + elif mode == 'neg': + ch_idx, t_idx_win = np.unravel_index(np.argmin(data_win), data_win.shape) + else: + ch_idx, t_idx_win = np.unravel_index(np.argmax(np.abs(data_win)), data_win.shape) + + peak_channel = evoked_potential.ch_names[ch_idx] + peak_sample = np.where(time_mask)[0][t_idx_win] + sample_latency = evoked_potential.times[peak_sample] + + # Step 2: parabolic interpolation around the peak sample + times = evoked_potential.times + data = evoked_potential.data[ch_idx] + + # Need at least one sample on each side for the fit + if 0 < peak_sample < len(times) - 1: + y_prev = data[peak_sample - 1] + y_peak = data[peak_sample] + y_next = data[peak_sample + 1] + + # Parabolic interpolation: offset from centre sample + denom = y_prev - 2 * y_peak + y_next + if abs(denom) > 1e-30: + offset = 0.5 * (y_prev - y_next) / denom + dt = times[peak_sample] - times[peak_sample - 1] + interp_latency = times[peak_sample] + offset * dt + interp_uv = y_peak - 0.25 * (y_prev - y_next) * offset + else: + interp_latency = sample_latency + interp_uv = y_peak + else: + interp_latency = sample_latency + interp_uv = data[peak_sample] + + return { + 'name': erp_name, + 'latency': interp_latency, + 'channel': peak_channel, + 'amplitude': interp_uv + } + + +def get_pr_vep_latencies(evoked_occipital: Evoked): + n75 = get_peak(erp_name='N75', evoked_potential=evoked_occipital, + peak_time_min=0.060, peak_time_max=0.090, mode='neg') + p100 = get_peak(erp_name='P100', evoked_potential=evoked_occipital, + peak_time_min=0.080, peak_time_max=0.130, mode='pos') + n145 = get_peak(erp_name='N145', evoked_potential=evoked_occipital, + peak_time_min=0.120, peak_time_max=0.170, mode='neg') + + return n75, p100, n145 + + +# --------------------------------------------------------------------------- +# Robust averaging + JSON helper +# --------------------------------------------------------------------------- + +def trimmed_average(epochs, proportiontocut=0.1): + """Per-sample trimmed-mean evoked across trials. + + Standard ``epochs.average()`` is biased by occasional single-trial outliers + that survive amplitude-based rejection. Dropping the top and bottom + ``proportiontocut`` fraction at each timepoint produces a more + representative waveform without raising the rejection threshold or + discarding whole epochs. + """ + data = epochs.get_data() # (n_trials, n_channels, n_times) + if data.shape[0] == 0: + return epochs.average() + avg = trim_mean(data, proportiontocut=proportiontocut, axis=0) + nave = int(round(data.shape[0] * (1 - 2 * proportiontocut))) + return EvokedArray(avg, epochs.info, tmin=epochs.tmin, nave=nave, + comment=f'trimmed-mean ({int(proportiontocut * 100)}%)') + + +def json_safe_float(x): + """Coerce numpy scalars / None / NaN / Inf to JSON-safe Python floats.""" + if x is None: + return None + try: + v = float(x) + except (TypeError, ValueError): + return None + if np.isnan(v) or np.isinf(v): + return None + return v + + +# --------------------------------------------------------------------------- +# PR-VEP biomarkers (pure computation; printing/plotting stays in callers) +# --------------------------------------------------------------------------- + +def compute_iold(p100_left, p100_right, flag_threshold_ms=IOLD_FLAG_MS): + """Inter-ocular latency difference at P100 (signed L − R, ms). + + Returns ``None`` if either eye is missing a P100 detection. Otherwise a + dict with per-eye latency/amplitude, the signed difference, and a + threshold-crossing flag suitable for clinical screening. + """ + if p100_left is None or p100_right is None: + return None + p100_left_ms = p100_left['latency'] * 1000.0 + p100_right_ms = p100_right['latency'] * 1000.0 + iold_ms = p100_left_ms - p100_right_ms + return { + 'p100_left_ms': json_safe_float(p100_left_ms), + 'p100_right_ms': json_safe_float(p100_right_ms), + 'p100_left_uv': json_safe_float(p100_left['amplitude'] * 1e6), + 'p100_right_uv': json_safe_float(p100_right['amplitude'] * 1e6), + 'iold_ms': json_safe_float(iold_ms), + 'flag': bool(abs(iold_ms) > flag_threshold_ms), + } + + +def compute_iold_per_size(epochs, event_id, ch_name, + sizes=('large', 'small'), + eye_prefixes=('left_eye', 'right_eye'), + flag_threshold_ms=IOLD_FLAG_MS): + """Per-check-size IOLD. + + Demyelination preferentially delays the high-spatial-frequency (small- + check) response, so the per-size IOLD often surfaces lateralised + dysfunction that the size-pooled IOLD averages out. + + For each size in ``sizes``, trims-mean per-eye epochs at ``ch_name``, + locates P100, then runs :func:`compute_iold` on the pair. Returns + ``{size: iold_dict_or_None}``. + """ + out = {} + for size in sizes: + peaks = {} + for eye in eye_prefixes: + cond_key = f'{eye}/{size}' + if cond_key not in event_id or len(epochs[cond_key]) == 0: + peaks[eye] = None + continue + ev = trimmed_average(epochs[cond_key]).copy().pick([ch_name]) + _, p100, _ = get_pr_vep_latencies(ev) + peaks[eye] = p100 + out[size] = compute_iold(peaks.get('left_eye'), peaks.get('right_eye'), + flag_threshold_ms=flag_threshold_ms) + return out + + +def compute_amplitude_ratio(p100_left, p100_right, log2_flag=LOG2_AMP_FLAG): + """P100 amplitude ratio L/R (rectified) and log2 ratio. + + Returns ``None`` if either eye lacks a P100 or right-eye amplitude is zero + (ratio undefined). + """ + if p100_left is None or p100_right is None: + return None + amp_l_uv = abs(p100_left['amplitude']) * 1e6 + amp_r_uv = abs(p100_right['amplitude']) * 1e6 + if amp_r_uv <= 0: + return None + ratio = amp_l_uv / amp_r_uv + log2_ratio = float(np.log2(ratio)) + return { + 'amp_left_uv': json_safe_float(amp_l_uv), + 'amp_right_uv': json_safe_float(amp_r_uv), + 'ratio': json_safe_float(ratio), + 'log2_ratio': json_safe_float(log2_ratio), + 'flag': bool(abs(log2_ratio) > log2_flag), + } + + +def compute_check_size_slope(epochs, event_id, ch_name, check_size_arcmin, + eye_prefixes=('left_eye', 'right_eye'), + min_trials=5): + """Per-eye P100 latency slope vs. check size (ms / arcmin). + + For each (eye × size) condition present in ``event_id`` (keys of the form + ``"{eye_prefix}/{size_label}"``), trims-mean across trials, locates the + P100 in the canonical search window, and fits a line of P100 latency vs. + check size in arcmin. The L − R slope difference amplifies asymmetric + spatial-frequency-dependent demyelination. + + Returns a dict with per-condition P100 latencies, per-eye slopes (or None + if fewer than two sizes were detected), and the L − R slope difference (or + None if either eye is missing a slope). + """ + out = { + 'per_condition_p100_ms': {}, + 'slope_left_ms_per_arcmin': None, + 'slope_right_ms_per_arcmin': None, + 'slope_diff': None, + } + rows = [] + for eye in eye_prefixes: + for size_label, arcmin in check_size_arcmin.items(): + cond_key = f'{eye}/{size_label}' + if cond_key not in event_id: + continue + ep = epochs[cond_key] + if len(ep) < min_trials: + continue + ev = trimmed_average(ep) + _, p100_c, _ = get_pr_vep_latencies(ev.copy().pick([ch_name])) + if p100_c is None: + continue + lat_ms = p100_c['latency'] * 1000.0 + rows.append({'eye': eye, 'size_label': size_label, + 'size_arcmin': arcmin, 'p100_ms': lat_ms}) + out['per_condition_p100_ms'][cond_key] = json_safe_float(lat_ms) + + slopes = {} + for eye in eye_prefixes: + eye_rows = [r for r in rows if r['eye'] == eye] + if len(eye_rows) >= 2: + xs = np.array([r['size_arcmin'] for r in eye_rows]) + ys = np.array([r['p100_ms'] for r in eye_rows]) + slopes[eye] = float(np.polyfit(xs, ys, 1)[0]) + if 'left_eye' in slopes: + out['slope_left_ms_per_arcmin'] = json_safe_float(slopes['left_eye']) + if 'right_eye' in slopes: + out['slope_right_ms_per_arcmin'] = json_safe_float(slopes['right_eye']) + if 'left_eye' in slopes and 'right_eye' in slopes: + out['slope_diff'] = json_safe_float(slopes['left_eye'] - slopes['right_eye']) + return out + + +def bootstrap_p100_latency(epochs, event_id, ch_name, eye_prefix, + win_ms=(60, 160), n_boot=1000, seed=0, + proportiontocut=0.1, min_trials=10): + """Bootstrap distribution of P100 latency for one eye. + + Trial-resamples with replacement, recomputes the trimmed-mean evoked at + ``ch_name``, and locates the positive max in ``win_ms``. Returns a + length-``n_boot`` array of latencies (ms), or ``None`` if there aren't + enough trials. Pair two calls (one per eye) to derive an IOLD CI from the + pairwise differences. + """ + keys = [k for k in event_id if k.startswith(eye_prefix)] + if not keys: + return None + ep = epochs[keys].copy().pick([ch_name]) + data = ep.get_data()[:, 0, :] + if data.shape[0] < min_trials: + return None + times_ms = ep.times * 1000.0 + win_mask = (times_ms >= win_ms[0]) & (times_ms <= win_ms[1]) + win_times = times_ms[win_mask] + n_trials = data.shape[0] + rng = np.random.default_rng(seed) + times_all = ep.times * 1000.0 # full time axis in ms + dt = float(times_all[1] - times_all[0]) # sample period (ms) + win_indices = np.where(win_mask)[0] # absolute indices of window samples + latencies = np.empty(n_boot) + for b in range(n_boot): + idx = rng.integers(0, n_trials, n_trials) + boot_avg = trim_mean(data[idx], proportiontocut=proportiontocut, axis=0) + + # Grid-level peak within the search window + local_peak = np.argmax(boot_avg[win_mask]) + abs_peak = win_indices[local_peak] + + # Parabolic interpolation: recovers sub-sample peak location, + # reducing CI quantisation from ±dt to roughly ±0.3 dt. + if 0 < abs_peak < len(boot_avg) - 1: + y_l = boot_avg[abs_peak - 1] + y_c = boot_avg[abs_peak] + y_r = boot_avg[abs_peak + 1] + denom = y_l - 2 * y_c + y_r + if abs(denom) > 1e-30: + offset = 0.5 * (y_l - y_r) / denom # fractional sample offset + latencies[b] = times_all[abs_peak] + offset * dt + else: + latencies[b] = times_all[abs_peak] + else: + latencies[b] = times_all[abs_peak] + + return latencies + + +def compute_hemi_asymmetry(evoked_avg, ch_left, ch_right, + lat_flag_ms=IOLD_FLAG_MS, log2_flag=LOG2_AMP_FLAG): + """P100 latency/amplitude asymmetry between two hemispheric channels. + + Works for any homologous pair (O1/O2, P7/P8, ...). Returns ``None`` if + either channel fails P100 detection. Sign convention: ``lat_diff = + ch_left − ch_right``, so positive means the left-named channel is later. + """ + ev_l = evoked_avg.copy().pick([ch_left]) + ev_r = evoked_avg.copy().pick([ch_right]) + _, p100_l, _ = get_pr_vep_latencies(ev_l) + _, p100_r, _ = get_pr_vep_latencies(ev_r) + if p100_l is None or p100_r is None: + return None + lat_l = p100_l['latency'] * 1000.0 + lat_r = p100_r['latency'] * 1000.0 + amp_l = abs(p100_l['amplitude']) * 1e6 + amp_r = abs(p100_r['amplitude']) * 1e6 + lat_diff = lat_l - lat_r + amp_ratio = amp_l / amp_r if amp_r > 0 else float('inf') + log2_ratio = float(np.log2(amp_ratio)) if amp_r > 0 else float('nan') + return { + f'lat_{ch_left.lower()}': json_safe_float(lat_l), + f'lat_{ch_right.lower()}': json_safe_float(lat_r), + f'amp_{ch_left.lower()}': json_safe_float(amp_l), + f'amp_{ch_right.lower()}': json_safe_float(amp_r), + 'lat_diff_ms': json_safe_float(lat_diff), + 'amp_ratio': json_safe_float(amp_ratio), + 'log2_ratio': json_safe_float(log2_ratio), + 'lat_flag': bool(abs(lat_diff) > lat_flag_ms), + 'amp_flag': bool(amp_r > 0 and abs(log2_ratio) > log2_flag), + } + + +def compute_hemi_delta_asymmetry(left_eye_hemi, right_eye_hemi, + ch_left, ch_right): + """Inter-ocular contrast of hemispheric asymmetry. + + Δlat = (chL − chR)|left_eye − (chL − chR)|right_eye + Δlog2 = log2(chL/chR)|left_eye − log2(chL/chR)|right_eye + + A purely anatomical chL 0 else float('nan')) + log2_asym_R = (np.log2(R[amp_key_l] / R[amp_key_r]) + if R[amp_key_r] > 0 else float('nan')) + d_log2 = log2_asym_L - log2_asym_R + return { + 'lat_asym_left': json_safe_float(lat_asym_L), + 'lat_asym_right': json_safe_float(lat_asym_R), + 'd_lat': json_safe_float(d_lat), + 'log2_asym_left': json_safe_float(log2_asym_L), + 'log2_asym_right': json_safe_float(log2_asym_R), + 'd_log2': json_safe_float(d_log2), + } diff --git a/eegnb/cli/introprompt.py b/eegnb/cli/introprompt.py index ea96b3685..9e16162a0 100644 --- a/eegnb/cli/introprompt.py +++ b/eegnb/cli/introprompt.py @@ -4,7 +4,7 @@ from eegnb import generate_save_fn, DATA_DIR from eegnb.devices.eeg import EEG -from .utils import run_experiment, get_exp_desc, experiments +from .utils import run_experiment, get_exp_desc, get_experiments eegnb_sites = ['eegnb_examples', 'grifflab_dev', 'jadinlab_home'] @@ -87,6 +87,7 @@ def device_prompt() -> EEG: def exp_prompt(runorzip:str='run') -> str: + experiments = get_experiments() print("\nPlease select which experiment you would like to %s: \n" %runorzip) print( "\n".join( diff --git a/eegnb/cli/utils.py b/eegnb/cli/utils.py index fad282cf0..b7191bf29 100644 --- a/eegnb/cli/utils.py +++ b/eegnb/cli/utils.py @@ -1,38 +1,42 @@ -#change the pref libraty to PTB and set the latency mode to high precision -from psychopy import prefs -prefs.hardware['audioLib'] = 'PTB' -prefs.hardware['audioLatencyMode'] = 3 +try: + #change the pref libraty to PTB and set the latency mode to high precision + from psychopy import prefs + prefs.hardware['audioLib'] = 'PTB' + prefs.hardware['audioLatencyMode'] = 3 +except ImportError: + pass from eegnb.devices.eeg import EEG - -from eegnb.experiments import VisualN170, Experiment -from eegnb.experiments import VisualP300 -from eegnb.experiments import VisualSSVEP -from eegnb.experiments import AuditoryOddball -from eegnb.experiments.visual_cueing import cueing -from eegnb.experiments.visual_codeprose import codeprose -from eegnb.experiments.auditory_oddball import diaconescu -from eegnb.experiments.auditory_ssaep import ssaep, ssaep_onefreq from typing import Optional +def get_experiments(): + from eegnb.experiments import VisualN170, Experiment + from eegnb.experiments import VisualP300 + from eegnb.experiments import VisualSSVEP + from eegnb.experiments import AuditoryOddball + from eegnb.experiments.visual_cueing import cueing + from eegnb.experiments.visual_codeprose import codeprose + from eegnb.experiments.auditory_oddball import diaconescu + from eegnb.experiments.auditory_ssaep import ssaep, ssaep_onefreq -# New Experiment Class structure has a different initilization, to be noted -experiments = { - "visual-N170": VisualN170(), - "visual-P300": VisualP300(), - "visual-SSVEP": VisualSSVEP(), - "visual-cue": cueing, - "visual-codeprose": codeprose, - "auditory-SSAEP orig": ssaep, - "auditory-SSAEP onefreq": ssaep_onefreq, - "auditory-oddball orig": AuditoryOddball(), - "auditory-oddball diaconescu": diaconescu, -} + # New Experiment Class structure has a different initilization, to be noted + return { + "visual-N170": VisualN170, + "visual-P300": VisualP300, + "visual-SSVEP": VisualSSVEP, + "visual-cue": cueing, + "visual-codeprose": codeprose, + "auditory-SSAEP orig": ssaep, + "auditory-SSAEP onefreq": ssaep_onefreq, + "auditory-oddball orig": AuditoryOddball, + "auditory-oddball diaconescu": diaconescu, + } def get_exp_desc(exp: str): + experiments = get_experiments() if exp in experiments: module = experiments[exp] if hasattr(module, "__title__"): @@ -43,17 +47,24 @@ def get_exp_desc(exp: str): def run_experiment( experiment: str, eeg_device: EEG, record_duration: Optional[float] = None, save_fn=None ): + experiments = get_experiments() if experiment in experiments: - module = experiments[experiment] + exp_item = experiments[experiment] + + from eegnb.experiments import Experiment # Condition added for different run types of old and new experiment class structure - if isinstance(module, Experiment.BaseExperiment): + # If it's a class (BaseExperiment subclass), instantiate it + if isinstance(exp_item, type) and issubclass(exp_item, Experiment.BaseExperiment): + # Concrete subclasses supply defaults for BaseExperiment's required args; mypy can't see which subclass. + module = exp_item() # type: ignore[call-arg] module.duration = record_duration module.eeg = eeg_device module.save_fn = save_fn module.run() else: - module.present(duration=record_duration, eeg=eeg_device, save_fn=save_fn) # type: ignore + # Otherwise it's an old-style module + exp_item.present(duration=record_duration, eeg=eeg_device, save_fn=save_fn) # type: ignore else: print("\nError: Unknown experiment '{}'".format(experiment)) print("\nExperiment can be one of:") diff --git a/eegnb/datasets/datasets.py b/eegnb/datasets/datasets.py index 21add031c..61a7fa429 100644 --- a/eegnb/datasets/datasets.py +++ b/eegnb/datasets/datasets.py @@ -54,10 +54,12 @@ def fetch_dataset( "visual-P300", "visual-spatialfreq", "visual-SSVEP", + "visual-PRVEP", ] # List gdrive extensions for various experiments gdrive_locs = { + "visual-PRVEP": "1qn-0OSxlO6-stL6EXh9VT4pMSFghCXBG", "visual-SSVEP": "1zj9Wx-YEMJo7GugUUu7Sshcybfsr-Fze", "visual-spatialfreq": "1ggBt7CNvMgddxji-FvxcZoP-IF-PmESX", "visual-P300": "1OLcj-zSjqdNrsBSUAsGBXOwWDnGWTVFC", @@ -84,6 +86,12 @@ def fetch_dataset( download_it = True if download_it: + if gdrive_locs.get(experiment) is None: + raise ValueError( + f"No example dataset available for '{experiment}' yet. " + "Upload a zip to Google Drive and add the file ID to gdrive_locs in datasets.py." + ) + # check if data directory exits. If not, create it if os.path.exists(data_dir) is not True: os.makedirs(data_dir) diff --git a/eegnb/devices/__init__.py b/eegnb/devices/__init__.py index e69de29bb..a95062678 100644 --- a/eegnb/devices/__init__.py +++ b/eegnb/devices/__init__.py @@ -0,0 +1,9 @@ +from eegnb.devices.utils import ( # noqa: F401 + CYTON_CONFIG_GAIN_1X, + CYTON_CONFIG_GAIN_2X, + CYTON_CONFIG_GAIN_4X, + CYTON_CONFIG_GAIN_6X, + CYTON_CONFIG_GAIN_8X, + CYTON_CONFIG_GAIN_12X, + CYTON_CONFIG_GAIN_24X, +) diff --git a/eegnb/devices/eeg.py b/eegnb/devices/eeg.py index ba85f2e25..331f3e363 100644 --- a/eegnb/devices/eeg.py +++ b/eegnb/devices/eeg.py @@ -20,11 +20,15 @@ from serial import Serial, EIGHTBITS, PARITY_NONE, STOPBITS_ONE -import pyxid2 +try: + import pyxid2 +except Exception: + pyxid2 = None from eegnb.devices.utils import ( get_openbci_usb, create_stim_array, + assert_ftdi_latency_1ms, SAMPLE_FREQS, EEG_INDICES, EEG_CHANNELS, @@ -323,6 +327,8 @@ def _init_brainflow(self): if self.serial_port: serial_port = str(self.serial_port) self.brainflow_params.serial_port = serial_port + if self.device_name in ('cyton', 'cyton_daisy'): + assert_ftdi_latency_1ms(serial_port) # Initialize board_shim self.sfreq = BoardShim.get_sampling_rate(self.brainflow_id) @@ -333,11 +339,14 @@ def _init_brainflow(self): if self.config: # For Cyton boards, split config string by 'X' delimiter and apply each setting if 'cyton' in self.device_name: - config_settings = self.config.split('X') + config_settings = [s for s in self.config.split('X') if s] for setting in config_settings: - self.board.config_board(setting + 'X') + cmd = setting + 'X' + response = self.board.config_board(cmd) + print(f"[cyton config] {cmd} -> {response!r}") else: - self.board.config_board(self.config) + response = self.board.config_board(self.config) + print(f"[config_board] {self.config!r} -> {response!r}") def _start_brainflow(self): # only start stream if non exists @@ -412,6 +421,17 @@ def _brainflow_extract(self, data): eeg_data = data[:, BoardShim.get_eeg_channels(self.brainflow_id)] timestamps = data[:, BoardShim.get_timestamp_channel(self.brainflow_id)] + # BrainFlow scales Cyton data assuming 24× gain. If a different gain was + # configured via config_board, correct the scaling here. + # Config string format: x{ch}0{gain_code}0110X (gain_code: 0=1×,1=2×,2=4×,3=6×,4=8×,5=12×,6=24×) + if self.config and 'cyton' in self.device_name: + gain_multipliers = {0: 1, 1: 2, 2: 4, 3: 6, 4: 8, 5: 12, 6: 24} + brainflow_assumed_gain = 24 + gain_code = int(self.config[3]) # 4th char of first command "x{ch}0{G}..." + actual_gain = gain_multipliers.get(gain_code, brainflow_assumed_gain) + if actual_gain != brainflow_assumed_gain: + eeg_data = eeg_data * (brainflow_assumed_gain / actual_gain) + return ch_names, eeg_data, timestamps def _brainflow_push_sample(self, marker): @@ -670,14 +690,21 @@ def start(self, fn, duration=None): - def push_sample(self, marker, timestamp, marker_name=None): + def push_sample(self, marker, timestamp=None, marker_name=None): """ Universal method for pushing a marker and its timestamp to store alongside the EEG data. Parameters: marker (int): marker number for the stimuli being presented. - timestamp (float): timestamp of stimulus onset from time.time() function. + timestamp (float, optional): timestamp of stimulus onset from time.time() function. + Not used by the BrainFlow backend (which records the board's current sample + timestamp instead). Required by muselsl and kernelflow backends. + + Returns: + bool: True if the marker was recorded, False if the stream is no longer active. """ + if not self.stream_started: + return False if self.backend == "brainflow": self._brainflow_push_sample(marker=marker) elif self.backend == "muselsl": @@ -688,8 +715,10 @@ def push_sample(self, marker, timestamp, marker_name=None): self._serial_push_sample(marker=marker) elif self.backend == "xidport": self._xid_push_sample(marker=marker) + return True def stop(self): + self.stream_started = False if self.backend == "brainflow": self._stop_brainflow() elif self.backend == "muselsl": diff --git a/eegnb/devices/utils.py b/eegnb/devices/utils.py index c804befd0..9b60d95b3 100644 --- a/eegnb/devices/utils.py +++ b/eegnb/devices/utils.py @@ -1,5 +1,6 @@ import numpy as np import socket +import sys import platform import serial @@ -90,6 +91,44 @@ } + +# --------------------------------------------------------------------------- +# Cyton board channel configuration presets +# --------------------------------------------------------------------------- +# Each channel command has the format: x N P G I B S1 S2 X +# N = channel number (1-8) +# P = power (0=ON, 1=OFF) +# G = gain (0=1×, 1=2×, 2=4×, 3=6×, 4=8×, 5=12×, 6=24×) +# I = input type (0=normal EEG, 1=shorted, ...) +# B = include in BIAS derivation (1=yes) +# S2 = SRB2 connection (1=connected) +# S1 = SRB1 connection (0=disconnected) +# +# Build a config string by joining per-channel strings — applied with +# EEG(device='cyton', config=CYTON_CONFIG_GAIN_4X). + +def _cyton_ch_config(gain_code: int, n_channels: int = 8) -> str: + """Build a Cyton channel-settings string for all channels. + + Args: + gain_code: ADS1299 gain code (0=1×, 1=2×, 2=4×, 3=6×, 4=8×, 5=12×, 6=24×). + n_channels: Number of channels to configure (default 8 for standard Cyton). + + Returns: + Config string ready to pass to ``EEG(config=...)``. + """ + return "".join(f"x{ch}0{gain_code}0110X" for ch in range(1, n_channels + 1)) + +# Standard gain presets — normal EEG input, bias enabled, SRB2 on, SRB1 off. +CYTON_CONFIG_GAIN_1X = _cyton_ch_config(0) # 1× (for strong signals / testing) +CYTON_CONFIG_GAIN_2X = _cyton_ch_config(1) # 2× +CYTON_CONFIG_GAIN_4X = _cyton_ch_config(2) # 4× - for Thinkpulse electrodes +CYTON_CONFIG_GAIN_6X = _cyton_ch_config(3) # 6× +CYTON_CONFIG_GAIN_8X = _cyton_ch_config(4) # 8× +CYTON_CONFIG_GAIN_12X = _cyton_ch_config(5) # 12× — good general-purpose EEG config +CYTON_CONFIG_GAIN_24X = _cyton_ch_config(6) # 24× — default gain + + def create_stim_array(timestamps, markers): """Creates a stim array which is the lenmgth of the EEG data where the stimuli are lined up with their corresponding EEG sample. @@ -123,3 +162,80 @@ def get_openbci_usb(): def serial_ports(): return serial.tools.list_ports.comports() + + +def get_ftdi_latency_ms(com_port: str): + """Read the FTDI VCP LatencyTimer (ms) for a COM port from the Windows registry. + + Returns the latency in ms, or None on non-Windows platforms or if the port + is not found under HKLM\\SYSTEM\\CurrentControlSet\\Enum\\FTDIBUS. + """ + if sys.platform != 'win32': + return None + import winreg + ftdi_root = r"SYSTEM\CurrentControlSet\Enum\FTDIBUS" + target = com_port.upper() + try: + root_key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, ftdi_root) + except OSError: + return None + try: + for i in range(1024): + try: + device_name = winreg.EnumKey(root_key, i) + except OSError: + break + device_path = f"{ftdi_root}\\{device_name}" + try: + device_key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, device_path) + except OSError: + continue + try: + for j in range(64): + try: + instance = winreg.EnumKey(device_key, j) + except OSError: + break + params_path = f"{device_path}\\{instance}\\Device Parameters" + try: + params_key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, params_path) + except OSError: + continue + try: + port_name, _ = winreg.QueryValueEx(params_key, "PortName") + if str(port_name).upper() == target: + latency, _ = winreg.QueryValueEx(params_key, "LatencyTimer") + return int(latency) + except OSError: + pass + finally: + params_key.Close() + finally: + device_key.Close() + finally: + root_key.Close() + return None + + +def assert_ftdi_latency_1ms(com_port: str) -> None: + """Assert the FTDI LatencyTimer for a COM port is 1 ms (Windows only). + + The OpenBCI Cyton dongle ships with the Windows default of 16 ms, which + adds ~15 ms of USB buffering jitter to every marker push and corrupts + stimulus/EEG alignment for VEP-class experiments. No-op on non-Windows. + + Fix in: Device Manager -> Ports -> USB Serial Port -> Properties -> + Port Settings -> Advanced -> Latency Timer (ms) = 1. + """ + if sys.platform != 'win32': + return + latency = get_ftdi_latency_ms(com_port) + assert latency is not None, ( + f"Could not read FTDI LatencyTimer for {com_port}. " + f"Verify it is an FTDI device in Device Manager." + ) + assert latency == 1, ( + f"FTDI LatencyTimer for {com_port} is {latency} ms; required 1 ms. " + f"Device Manager -> Ports -> USB Serial Port ({com_port}) -> " + f"Properties -> Port Settings -> Advanced -> Latency Timer (ms) = 1." + ) diff --git a/eegnb/devices/vr.py b/eegnb/devices/vr.py new file mode 100644 index 000000000..9a5af550d --- /dev/null +++ b/eegnb/devices/vr.py @@ -0,0 +1,222 @@ +import logging +import numpy as np +from time import time +from psychopy import core, monitors +from psychopy.visual.rift import Rift + + +# Frame-rate deviation above this percentage triggers a user-facing warning. +# Set high (50%) because VR jitter is recoverable per-trial via the timing +# sidecar (app_motion_to_photon_latency_s). The warning catches fundamentally +# broken setups (wrong GPU, no acceleration), not borderline cases. +DISPLAY_DEVIATION_FLAG_PCT = 50.0 + + +def _build_placeholder_monitor(): + """Programmatic Monitor passed to the Rift constructor. + Display geometry comes from the HMD runtime, not this object. + Logger is silenced during construction because Monitor.__init__ emits an + unconditional 'Monitor specification not found' warning for any name not + present in the user's saved calibration database.""" + from psychopy import logging as psy_logging + prev_level = psy_logging.console.level + psy_logging.console.setLevel(psy_logging.ERROR) + try: + mon = monitors.Monitor('eegnb_vr_placeholder', autoLog=False) + mon.setDistance(60) + mon.setSizePix([1920, 1080]) + finally: + psy_logging.console.setLevel(prev_level) + return mon + +class VR(Rift): + """ + Extended VR class for HMDs, providing built-in methods for + stereoscopic rendering math, precise hardware clock synchronization, + and per-trial compositor telemetry buffering. + """ + def __init__(self, *args, **kwargs): + # Provide a placeholder monitor so PsychoPy doesn't emit + # 'Monitor specification not found' on first flip. The values are not + # used by the Rift compositor — display geometry comes from the HMD + # runtime via libovr — but PsychoPy's Window base class still queries + # the monitor object during initialization. + kwargs.setdefault('monitor', _build_placeholder_monitor()) + super().__init__(*args, **kwargs) + self.libovr_to_wallclock_offset = None + + def validate_frame_rate(self, draw_blank, n_frames=60, warmup=10): + """Measure actual frame delivery rate and compare to the runtime target. + + Specific to VR because Quest Link's encoded transport pipeline can + silently degrade (wrong GPU, encode bottleneck) even though the runtime + still advertises its nominal refresh rate. + + Args: + draw_blank: callable that draws a frame and flips the window. + Caller decides eye-buffer logic. + n_frames: measurement window. 60 frames ≈ 0.5s at 120 Hz. + warmup: discarded frames so the compositor reaches steady state. + + Returns: + Dict with ``target_hz``, ``actual_hz``, ``deviation_pct``, ``ok`` + (True iff deviation ≤ ``DISPLAY_DEVIATION_FLAG_PCT``). + """ + target_hz = float(self.displayRefreshRate) + + for _ in range(warmup): + draw_blank() + + t0 = core.getTime() + for _ in range(n_frames): + draw_blank() + elapsed = core.getTime() - t0 + + actual_hz = n_frames / elapsed + deviation = abs(actual_hz - target_hz) / target_hz * 100.0 + + result = { + 'target_hz': round(target_hz, 1), + 'actual_hz': round(actual_hz, 1), + 'deviation_pct': round(deviation, 1), + 'ok': deviation <= DISPLAY_DEVIATION_FLAG_PCT, + } + + status = 'OK' if result['ok'] else 'DEGRADED — check GPU assignment' + print(f"[vr] Target: {target_hz:.0f} Hz Measured: {actual_hz:.1f} Hz " + f"Deviation: {deviation:.1f}% [{status}]") + return result + + def compute_optical_axis_offsets(self): + """ + NDC x offsets placing content on each lens's optical axis: + ndc_x = (LeftTan - RightTan) / (LeftTan + RightTan). + """ + try: + import psychxr.drivers.libovr as libovr + # fov = [UpTan, DownTan, LeftTan, RightTan] + left_fov, _, _ = libovr.getEyeRenderFov(libovr.EYE_LEFT) + right_fov, _, _ = libovr.getEyeRenderFov(libovr.EYE_RIGHT) + left_L, left_R = float(left_fov[2]), float(left_fov[3]) + right_L, right_R = float(right_fov[2]), float(right_fov[3]) + return ((left_L - left_R) / (left_L + left_R), + (right_L - right_R) / (right_L + right_R)) + except Exception as e: + logging.warning(f"[VR] Failed to compute optical axis offsets: {e}") + return (0.0, 0.0) + + def sync_vr_clock(self): + """ + Calculates Wall-clock <-> LibOVR clock offset. LibOVR timestamps are on a QPC-based + clock with arbitrary zero; time.time() is Unix epoch. Sample paired calls in a + tight bracket and keep the tightest, so analysis can convert LibOVR times to wall-clock. + """ + if self.libovr_to_wallclock_offset is not None: + return self.libovr_to_wallclock_offset + + try: + from psychxr.drivers.libovr import timeInSeconds + best_bracket = None + best_offset = None + for _ in range(21): + t0 = time() + lovr = timeInSeconds() + t1 = time() + bracket = t1 - t0 + offset = 0.5 * (t0 + t1) - lovr + if best_bracket is None or bracket < best_bracket: + best_bracket = bracket + best_offset = offset + + logging.info( + f"[VR] clock offset (wall - libovr) = " + f"{best_offset:.6f}s (tightest bracket = {best_bracket*1e3:.3f}ms)" + ) + + self.libovr_to_wallclock_offset = best_offset + return best_offset + except Exception as e: + logging.warning(f"[VR] LibOVR clock sync failed: {e}") + return None + + def log_display_info(self): + """ + Reads IPD, PPD, and display resolution from the LibOVR session and logs + them to the telemetry sidecar as header comment rows. + Returns (ppd, ipd_mm) for use in stimulus sizing. + """ + try: + ppta = self.pixelsPerTanAngleAtCenter + ppd_h = np.mean([p[0] for p in ppta]) * (np.pi / 180.0) + ppd_v = np.mean([p[1] for p in ppta]) * (np.pi / 180.0) + ppd = int(round(min(ppd_h, ppd_v))) + + eye_to_nose = self.eyeToNoseDistance + ipd_mm = (eye_to_nose[0] + eye_to_nose[1]) * 1000.0 + + logging.info( + f"[VR] IPD={ipd_mm:.1f}mm ppd={ppd} (h={ppd_h:.1f} v={ppd_v:.1f}) " + f"res={self.displayResolution} eye_buf={self.size}" + ) + + if not hasattr(self, 'timing_data'): + self.timing_data = [] + self.timing_data.insert(0, ['# ipd_mm', ipd_mm, 'ppd', ppd, f'ppd_h={ppd_h:.1f} ppd_v={ppd_v:.1f}']) + + return ppd, ipd_mm + except Exception as e: + logging.warning(f"[VR] Failed to read display info: {e}") + return None, None + + def log_telemetry(self, trial_idx, software_time): + """Extracts native LibOVR performance stats and buffers them in memory.""" + submitted_frame_index = None + app_frame_index = None + app_m2p_s = None + comp_latency_s = None + time_to_vsync_s = None + + try: + submitted_frame_index = self._frameIndex - 1 + perf = getattr(self, '_perfStats', None) + if perf is not None and perf.frameStatsCount > 0: + stat = perf.frameStats[0] + app_frame_index = stat.appFrameIndex + app_m2p_s = stat.appMotionToPhotonLatency + comp_latency_s = stat.compositorLatency + time_to_vsync_s = stat.timeToVsync + except Exception: + pass + + if not hasattr(self, 'timing_data'): + self.timing_data = [] + + self.timing_data.append([ + trial_idx, software_time, + submitted_frame_index, app_frame_index, + app_m2p_s, comp_latency_s, time_to_vsync_s + ]) + + def save_telemetry(self, save_fn): + """Saves memory-buffered VR timing telemetry to a CSV sidecar.""" + timing_data = getattr(self, 'timing_data', []) + if not timing_data: + return + + import csv + timing_path = save_fn.with_name(save_fn.stem + '_timing.csv') if save_fn else 'vr_timing.csv' + with open(timing_path, 'w', newline='') as f: + writer = csv.writer(f) + writer.writerow([ + 'trial_idx', 'software_time', + 'submitted_frame_index', 'app_frame_index', + 'app_motion_to_photon_latency_s', 'compositor_latency_s', + 'time_to_vsync_s' + ]) + + # Log the clock offset if calculated + if self.libovr_to_wallclock_offset is not None: + writer.writerow(['# libovr_to_wallclock_offset_s', self.libovr_to_wallclock_offset, 'bracket_ms', 0]) + + writer.writerows(timing_data) + print(f" Saved VR timing telemetry to {timing_path}") diff --git a/eegnb/experiments/BlockExperiment.py b/eegnb/experiments/BlockExperiment.py new file mode 100644 index 000000000..a728a21a5 --- /dev/null +++ b/eegnb/experiments/BlockExperiment.py @@ -0,0 +1,151 @@ +""" +BlockExperiment Class - Extends BaseExperiment with block-based functionality + +This class provides block-based experiment capabilities by inheriting from BaseExperiment +and overriding the run method to handle multiple blocks. It loads stimulus only once +and reuses it across blocks, while allowing block-specific instructions. + +Experiments that need block-based execution should inherit from this class instead of BaseExperiment. +""" +import gc +from abc import ABC +from time import time + +from .Experiment import BaseExperiment +from psychopy import core + + +class BlockExperiment(BaseExperiment, ABC): + """ + Inherits from BaseExperiment to provide block-based functionality. + + This class is designed for experiments that need to run as multiple blocks. + Each block has its own instructions and duration. It loads all stimuli at once, then re/uses it across blocks. + """ + + def __init__(self, exp_name, block_duration, eeg, save_fn, block_trial_size, n_blocks, iti: float, soa: float, jitter: float, + use_vr=False, use_fullscr=True, stereoscopic=False): + """ Initializer for the BlockExperiment Class + + Args: + exp_name (str): Name of the experiment + block_duration (float): Duration of each block in seconds + eeg: EEG device object for recording + save_fn (str): Save filename for data + block_trial_size (int): Number of trials per block + n_blocks (int): Number of blocks to run + iti (float): Inter-trial interval + soa (float): Stimulus on arrival + jitter (float): Random delay between stimulus + use_vr (bool): Use VR for displaying stimulus + use_fullscr (bool): Use fullscreen mode + """ + # Calculate total trials for the base class + total_trials = block_trial_size * n_blocks + + # Initialize BaseExperiment with total trials + # Pass None for duration if block_duration is None to ignore time spent in instructions + super().__init__(exp_name, block_duration, eeg, save_fn, total_trials, iti, soa, jitter, use_vr, use_fullscr, stereoscopic=stereoscopic) + + # Block-specific parameters + self.block_duration = block_duration + self.block_trial_size = block_trial_size + self.n_blocks = n_blocks + + # Current block index + self.current_block_index = 0 + + def present_block_instructions(self, current_block): + """ + Display instructions for the current block to the user. + + This method is meant to be overridden by child classes to provide + experiment-specific instructions before each block. The base implementation + simply flips the window without adding any text. + + This method is called by __show_block_instructions in a loop until the user + provides input to continue or cancel the experiment. + + Args: + current_block (int): The current block number (0-indexed), used to customize + instructions for specific blocks if needed. + """ + self.window.flip() + + def _show_block_instructions(self, block_number): + """ + Show instructions for a specific block + + Args: + block_number (int): Current block number (0-indexed) + + Returns: + tuple: (continue_experiment, instruction_end_time) + - continue_experiment (bool): Whether to continue the experiment + """ + + # Clear any previous input + self._clear_user_input() + + # Wait for user input to continue + while True: + # Display the instruction text + super()._draw(lambda: self.present_block_instructions(block_number)) + + if self._user_input('start'): + return True + elif self._user_input('cancel'): + return False + + def run(self, instructions=True): + """ + Run the experiment as a series of blocks + + This method overrides BaseExperiment.run() to handle multiple blocks. + + Args: + instructions (bool): Whether to show the initial experiment instructions + """ + # Setup the experiment (creates window, loads stimulus once) + if not self.setup(instructions): + return False + + # Start EEG Stream once for all blocks + if self.eeg: + print("Wait for the EEG-stream to start...") + self.eeg.start(self.save_fn) + print("EEG Stream started") + + self._enable_frame_tracking() + + # Run each block + for block_index in range(self.n_blocks): + self.current_block_index = block_index + print(f"Starting block {block_index + 1} of {self.n_blocks}") + + # Show block-specific instructions + if not self._show_block_instructions(block_index): + break + + core.rush(True) + gc.disable() + try: + if self.use_vr: + self.vr.sync_vr_clock() + if not self._run_trial_loop(start_time=time(), duration=self.block_duration): + break + finally: + gc.enable() + core.rush(False) + + self._report_frame_stats() + + # Stop EEG Stream after all blocks + if self.eeg: + self.eeg.stop() + + if self.use_vr: + self.vr.save_telemetry(self.save_fn) + + # Close window at the end of all blocks + self.window.close() diff --git a/eegnb/experiments/Experiment.py b/eegnb/experiments/Experiment.py index 4d7cab119..6db3de8e5 100644 --- a/eegnb/experiments/Experiment.py +++ b/eegnb/experiments/Experiment.py @@ -11,17 +11,19 @@ from abc import abstractmethod, ABC from typing import Callable from eegnb.devices.eeg import EEG -from psychopy import prefs -from psychopy.visual.rift import Rift +from eegnb.devices.vr import VR +from psychopy import prefs, visual, event, core +import gc from time import time import random +import json import numpy as np from pandas import DataFrame -from psychopy import visual, event from eegnb import generate_save_fn +from eegnb.experiments import diagnostics class BaseExperiment(ABC): @@ -61,11 +63,14 @@ def __init__(self, exp_name, duration, eeg, save_fn, n_trials: int, iti: float, self.stereoscopic = stereoscopic if use_vr: # VR interface accessible by specific experiment classes for customizing and using controllers. - self.rift: Rift = visual.Rift(monoscopic=not stereoscopic, headLocked=True) - # eye for presentation - if stereoscopic: - self.left_eye_x_pos = 0.2 - self.right_eye_x_pos = -0.2 + # VR extends psychopy's VR with clock sync, per-trial telemetry buffering, and telemetry CSV saving. + self.vr: VR = VR(monoscopic=not stereoscopic, headLocked=True) + + # Shift content onto each lens's optical axis. VR HMDs use canted + # asymmetric frustums, so NDC (0,0) is off-axis and binocular content + # there forces inward vergence ("cross-eyed" feel). + if use_vr and stereoscopic: + self.left_eye_x_pos, self.right_eye_x_pos = self.vr.compute_optical_axis_offsets() else: self.left_eye_x_pos = 0 self.right_eye_x_pos = 0 @@ -73,6 +78,10 @@ def __init__(self, exp_name, duration, eeg, save_fn, n_trials: int, iti: float, self.use_fullscr = use_fullscr self.window_size = [1600,800] + # Diagnostic results — populated by run()/setup(), read by show_diagnostics() + self.signal_check = None + self.display_check = None + # Initializing the marker names self.markernames = [1, 2] @@ -80,6 +89,7 @@ def __init__(self, exp_name, duration, eeg, save_fn, n_trials: int, iti: float, self.parameter = np.random.binomial(1, 0.5, self.n_trials) self.trials = DataFrame(dict(parameter=self.parameter, timestamp=np.zeros(self.n_trials))) + @abstractmethod def load_stimulus(self): """ @@ -113,18 +123,54 @@ def present_iti(self): """ self.window.flip() + def present_soa(self, idx: int): + """ + Method called each frame during the SOA wait (stimulus-on period between trial transitions). + + VR compositors require continuous frame submission (~120 Hz) or they emit + "failed to meet deadline" warnings and force half-rate reprojection. The default + implementation just flips; VR-capable subclasses should override to redraw the + stimulus for trial `idx` without side effects (no EEG markers, no timing rows). + + A bare window.flip() submits a stale/undefined frame and is not sufficient for + Quest / OpenXR compositors under load. + + idx : Trial index of the most recently presented stimulus — same value that was + passed to the preceding present_stimulus call. + """ + self.window.flip() + + def _draw_blank_frame(self): + """Draw a blank frame and flip — used for display rate measurement.""" + self._draw(self.window.flip) + def setup(self, instructions=True): # Setting up Graphics - self.window = ( - self.rift if self.use_vr - else visual.Window(self.window_size, monitor="testMonitor", units="deg", - screen = self.screen_num, fullscr=self.use_fullscr)) - + if self.use_vr: + self.window = self.vr + self.display_check = self.vr.validate_frame_rate(self._draw_blank_frame) + else: + self.window = visual.Window(self.window_size, + monitor=diagnostics.build_flat_monitor(self.screen_num), + units="deg", + screen=self.screen_num, + fullscr=self.use_fullscr) + actual_hz = self.window.getActualFrameRate(nFrames=20) + self.display_check = { + 'target_hz': round(actual_hz, 1) if actual_hz else None, + 'actual_hz': round(actual_hz, 1) if actual_hz else None, + 'deviation_pct': 0.0, + 'ok': actual_hz is not None, + } + self.window.mouseVisible = False + # Loading the stimulus from the specific experiment, throws an error if not overwritten in the specific experiment self.stim = self.load_stimulus() - - # Show Instruction Screen if not skipped by the user + + # Show diagnostics screen first if anything's wrong, then instructions. if instructions: + if not self.show_diagnostics(): + return False if not self.show_instructions(): return False @@ -151,10 +197,8 @@ def show_instructions(self): """ # Splitting instruction text into lines - self.instruction_text = self.instruction_text % self.duration - - # Disabling the cursor during display of instructions - self.window.mouseVisible = False + if '%s' in self.instruction_text: + self.instruction_text = self.instruction_text % self.duration # clear/reset any old key/controller events self._clear_user_input() @@ -165,13 +209,97 @@ def show_instructions(self): text = visual.TextStim(win=self.window, text=self.instruction_text, color=[-1, -1, -1]) self._draw(lambda: self.__draw_instructions(text)) - # Enabling the cursor again - self.window.mouseVisible = True + if self._user_input('cancel'): + return False + return True + + def show_diagnostics(self): + """Display a pre-experiment diagnostics screen — only when flagged. + + Shows synthetic-device warning, degraded-framerate warning, and + noisey electrode warning. If none are flagged the screen is skipped + entirely so the experimenter goes straight to the instructions. + Returns True to continue, False if the user cancels. + """ + warnings = diagnostics.format_diagnostic_warnings( + device_name=self.eeg.device_name if self.eeg else None, + display=self.display_check, + signal_check=self.signal_check, + ) + if not warnings: + return True + + body = "\n\n".join(warnings) + body += "\n\nFix the issues above, or press spacebar / index trigger to continue anyway." + + self._clear_user_input() + while not self._user_input('start'): + text = visual.TextStim( + win=self.window, text=body, + color=[1, 1, 1], font='Courier New', + height=0.04, wrapWidth=1.8, units='norm', + anchorHoriz='center', anchorVert='center', + ) + self._draw(lambda: self.__draw_instructions(text)) if self._user_input('cancel'): return False return True + def show_results(self, text): + """Display a results / summary screen after the experiment. + + Mirrors ``show_instructions``: loops the render loop until user input. Uses a + monospaced font so tabular output (e.g. recording quality reports) + aligns correctly. + + Args: + text (str): Text to display. Long lines are wrapped automatically. + """ + self._clear_user_input() + + stim = visual.TextStim( + win=self.window, text=text, + color=[1, 1, 1], # white on black background + font='Courier New', + height=0.04, + wrapWidth=1.8, + units='norm', + anchorHoriz='center', anchorVert='center', + ) + + while not self._user_input('start'): + self._draw(lambda: self.__draw_results(stim)) + if self._user_input('cancel'): + break + + def __draw_results(self, stim): + if self.use_vr and self.stereoscopic: + for eye, x_pos in [("left", self.left_eye_x_pos), + ("right", self.right_eye_x_pos)]: + self.window.setBuffer(eye) + stim.pos = (x_pos, 0) + stim.draw() + else: + stim.draw() + self.window.flip() + + def post_run(self): + """Called after the trial loop ends, before the window closes. + + Default: display a recording quality report so the experimenter can + decide whether to re-record before removing the electrodes. Override in a + subclass or reassign on the instance to replace this behaviour. + """ + if not self.save_fn: + return + try: + text = diagnostics.post_session_report(self.save_fn) + text += "\n\nPress spacebar or index trigger to close." + self.show_results(text) + except Exception as e: + print(f"[post_run] Could not display quality report: {e}") + def _user_input(self, input_type): if input_type == 'start': key_input = 'spacebar' @@ -215,13 +343,13 @@ def get_vr_input(self, vr_controller, button=None, trigger=False): """ trigger_squeezed = False if trigger: - for x in self.rift.getIndexTriggerValues(vr_controller): + for x in self.vr.getIndexTriggerValues(vr_controller): if x > 0.0: trigger_squeezed = True button_pressed = False if button is not None: - button_pressed, tsec = self.rift.getButtons([button], vr_controller, 'released') + button_pressed, tsec = self.vr.getButtons([button], vr_controller, 'released') if trigger_squeezed or button_pressed: return True @@ -247,6 +375,7 @@ def _draw(self, present_stimulus: Callable): tracking_state = self.window.getTrackingState() self.window.calcEyePoses(tracking_state.headPose.thePose) self.window.setDefaultView() + present_stimulus() def _clear_user_input(self): @@ -258,7 +387,7 @@ def clear_vr_input(self): Clears/resets input events from vr controllers """ if self.use_vr: - self.rift.updateInputState() + self.vr.updateInputState() def _run_trial_loop(self, start_time, duration): """ @@ -272,6 +401,14 @@ def _run_trial_loop(self, start_time, duration): """ + if self.use_vr and type(self).present_soa is BaseExperiment.present_soa: + raise NotImplementedError( + f"{type(self).__name__} uses VR but does not override present_soa(idx). " + "psychxr does not honor setAutoDraw, and the VR compositor requires per-frame " + "redraws during the SOA wait; the default flip-only implementation will blank " + "the stimulus after one frame. Override present_soa(idx) to redraw your stimulus." + ) + def iti_with_jitter(): return self.iti + np.random.rand() * self.jitter @@ -302,6 +439,10 @@ def iti_with_jitter(): # Stimulus presentation overwritten by specific experiment self._draw(lambda: self.present_stimulus(current_trial)) rendering_trial = current_trial + else: + # Keep submitting frames during SOA wait — VR compositor + # drops to half-rate if we stall between reversals. + self._draw(lambda: self.present_soa(current_trial)) else: self._draw(lambda: self.present_iti()) @@ -310,8 +451,49 @@ def iti_with_jitter(): return True + def _enable_frame_tracking(self): + """Enable per-frame interval recording for dropped frame diagnostics.""" + self.window.recordFrameIntervals = True + rate = self.window.displayRefreshRate if self.use_vr else self.window.getActualFrameRate() + self.display_refresh_rate = int(np.round(rate)) if rate else None + # Threshold for counting a frame as "dropped" — 50% over expected duration + expected_frame_dur = 1.0 / (rate or 60) + self.window.refreshThreshold = expected_frame_dur * 1.5 + + def _report_frame_stats(self): + """Print frame timing summary and save intervals alongside recording.""" + intervals = self.window.frameIntervals + if not intervals: + return + + intervals_ms = [i * 1000 for i in intervals] + dropped = self.window.nDroppedFrames + total = len(intervals) + mean_ms = np.mean(intervals_ms) + std_ms = np.std(intervals_ms) + max_ms = max(intervals_ms) + + print(f"\nFrame timing: {total} frames, {dropped} dropped ({dropped/total*100:.1f}%)") + print(f" Refresh rate: {self.display_refresh_rate} Hz") + print(f" Mean: {mean_ms:.2f}ms Std: {std_ms:.2f}ms Max: {max_ms:.2f}ms") + + if self.save_fn: + stats_path = self.save_fn.with_name(self.save_fn.stem + '_frame_stats.json') + with open(stats_path, 'w') as f: + json.dump({ + 'display_refresh_rate_hz': self.display_refresh_rate, + 'total_frames': total, + 'dropped_frames': dropped, + 'mean_ms': round(mean_ms, 3), + 'std_ms': round(std_ms, 3), + 'max_ms': round(max_ms, 3), + 'intervals_ms': [round(i, 3) for i in intervals_ms] + }, f, indent=2) + print(f" Saved to {stats_path}") + def run(self, instructions=True): """ Run the experiment """ + self.signal_check = diagnostics.check_signal_quality(self.eeg) # Setup the experiment self.setup(instructions) @@ -323,11 +505,22 @@ def run(self, instructions=True): self.eeg.start(self.save_fn, duration=self.duration + 5) print("EEG Stream started") + self._enable_frame_tracking() + # Record experiment until a key is pressed or duration has expired. record_start_time = time() - # Run the trial loop - self._run_trial_loop(record_start_time, self.duration) + core.rush(True) + gc.disable() + try: + if self.use_vr: + self.vr.sync_vr_clock() + self._run_trial_loop(record_start_time, self.duration) + finally: + gc.enable() + core.rush(False) + + self._report_frame_stats() # Clearing the screen for the next trial event.clearEvents() @@ -336,20 +529,37 @@ def run(self, instructions=True): if self.eeg: self.eeg.stop() + if self.use_vr: + self.vr.save_telemetry(self.save_fn) + + # Post-run hook (e.g. show a summary / quality report screen) + self.post_run() + # Closing the window self.window.close() + def push_vr_marker(self, marker, trial_idx): + """ + Pushes the marker to EEG and delegates high-resolution LibOVR + compositor stats to the VR hardware object. + """ + software_time = time() + + if not self.eeg.push_sample(marker=marker): + return + + if self.use_vr: + self.vr.log_telemetry(trial_idx, software_time) + def send_triggers(self, marker): """Send timing triggers to recording device[s]""" for dev in self.devices: timestamp = time() dev.push_sample(marker=marker, timestamp=timestamp) - @property def name(self) -> str: """ This experiment's name """ return self.exp_name - diff --git a/eegnb/experiments/__init__.py b/eegnb/experiments/__init__.py index 8532c0f76..2c1aaff44 100644 --- a/eegnb/experiments/__init__.py +++ b/eegnb/experiments/__init__.py @@ -1,27 +1,48 @@ -from .visual_n170.n170 import VisualN170 -from .visual_p300.p300 import VisualP300 -from .visual_ssvep.ssvep import VisualSSVEP +try: + from .visual_n170.n170 import VisualN170 + from .visual_p300.p300 import VisualP300 + from .visual_ssvep.ssvep import VisualSSVEP +except ImportError: + VisualN170 = None # type: ignore + VisualP300 = None # type: ignore + VisualSSVEP = None # type: ignore -from psychopy import sound, plugins, prefs -import platform +try: + from psychopy import sound, plugins, prefs + import platform + import logging -# PTB does not yet support macOS Apple Silicon freely, need to fall back to sounddevice. -if platform.system() == 'Darwin' and platform.machine() == 'arm64': - # import psychopy_sounddevice.backend_sounddevice - plugins.scanPlugins() - success = plugins.loadPlugin('psychopy-sounddevice') - print(f"psychopy_sounddevice plugin loaded: {success}") + # PTB does not yet support macOS Apple Silicon freely, need to fall back to sounddevice. + if platform.system() == 'Darwin' and platform.machine() == 'arm64': + # import psychopy_sounddevice.backend_sounddevice + plugins.scanPlugins() + success = plugins.loadPlugin('psychopy-sounddevice') + print(f"psychopy_sounddevice plugin loaded: {success}") - # Force reload sound module - import importlib - importlib.reload(sound) - # setting prefs.hardware['audio_device'] still falls back to a default device, need to use setDevice. - audio_device = prefs.hardware.get('audioDevice', 'default') - if audio_device and audio_device != 'default': - sound.setDevice(audio_device) -else: - #change the pref library to PTB and set the latency mode to high precision - prefs.hardware['audioLib'] = 'PTB' - prefs.hardware['audioLatencyMode'] = 3 + # Force reload sound module + import importlib + importlib.reload(sound) + + # Try to set the audio device if requested and available + audio_device = prefs.hardware.get('audioDevice', 'default') + if audio_device and audio_device != 'default': + if hasattr(sound, 'setDevice'): + try: + sound.setDevice(audio_device) + except Exception as e: + logging.warning(f"Failed to set audio device to '{audio_device}': {e}") + else: + logging.warning(f"sound.setDevice not available, could not set device to '{audio_device}'") + else: + #change the pref library to PTB and set the latency mode to high precision + prefs.hardware['audioLib'] = 'PTB' + prefs.hardware['audioLatencyMode'] = 3 +except ImportError: + import logging + # logging.warning("PsychoPy not found. Stimulus presentation experiments will not be available.") + pass -from .auditory_oddball.aob import AuditoryOddball \ No newline at end of file +try: + from .auditory_oddball.aob import AuditoryOddball +except ImportError: + AuditoryOddball = None # type: ignore diff --git a/eegnb/experiments/diagnostics.py b/eegnb/experiments/diagnostics.py new file mode 100644 index 000000000..4c81b126a --- /dev/null +++ b/eegnb/experiments/diagnostics.py @@ -0,0 +1,232 @@ +"""Pre- and post-experiment diagnostic checks. + +Used by ``eegnb.experiments.Experiment`` to validate the experimental setup +(display frame rate, electrode contact) before the trial loop starts, and to +summarise recording quality afterwards. + +Each function is a pure(-ish) routine that takes runtime objects and returns +a result dict or string. The Experiment class is responsible for calling them +at the right point and rendering the output. Keeping these out of +Experiment.py makes that class about *what* the experiment does rather than +*how* the setup is validated. +""" +from time import sleep +import pathlib + +import numpy as np + + +# --------------------------------------------------------------------------- +# Monitor (flat display) +# --------------------------------------------------------------------------- + +def build_flat_monitor(screen_num=0): + """Create a PsychoPy ``Monitor`` from detected screen properties. + + Avoids the 'Monitor specification not found' warning that PsychoPy emits + when a flat ``visual.Window`` can't locate a saved calibration file. + + Note: there is no equivalent ``measure_frame_rate`` for monitors — flat + displays deliver their nominal rate reliably (no encoded transport + pipeline), and PsychoPy's ``Window.getActualFrameRate`` already provides + the measurement when needed. Frame-rate validation lives on the ``VR`` + class because it's only informative where target Hz is decoupled from + actual delivery (Quest Link). + """ + import logging + from psychopy import monitors + from psychopy import logging as psy_logging + + # Temporarily elevate console logger to ERROR to suppress the + # "Monitor specification not found" warning during initialization. + if hasattr(psy_logging, 'console') and psy_logging.console: + old_level = psy_logging.console.level + psy_logging.console.setLevel(logging.ERROR) + else: + old_level = None + + try: + mon = monitors.Monitor('eegnb_auto', autoLog=False) + finally: + if old_level is not None: + psy_logging.console.setLevel(old_level) + + mon.setDistance(60) + try: + import pyglet + screen = pyglet.canvas.Display().get_screens()[screen_num] + mon.setSizePix([screen.width, screen.height]) + except Exception: + mon.setSizePix([1920, 1080]) + + # Persist the monitor specification so PsychoPy finds it on disk next time + mon.save() + return mon + + +# --------------------------------------------------------------------------- +# Pre-experiment signal quality check +# +# A brief read of the EEG amplifier's incoming signal to catch obviously +# broken channels (loose electrode, dead reference) before a session +# starts. +# +# This pre-flight check catches gross failures +# (no contact, broken wire, badly seated electrode); subtler problems +# like high-but-uniform noise or slow drift are caught by the post-session +# quality report instead. +# --------------------------------------------------------------------------- + +# Flag a channel only when both thresholds are exceeded — keeps the warning +# rate low for normal-but-noisy sessions. +SIGNAL_NOISE_FLAG_UV = 200.0 # absolute floor: nothing usable above this +SIGNAL_NOISE_REL_FACTOR = 3.0 # relative to montage median + + +def check_signal_quality(eeg, n_seconds=3): + """Read a brief EEG buffer and flag clearly broken channels. + + Used before a recording starts to catch hardware problems (loose + electrode, dead reference) before the recording begins. + The function reads ``n_seconds`` of live signal from the amplifier, + detrends each channel (1-second rolling-mean subtraction so DC drift + doesn't dominate), and computes the standard deviation as a baseline- + noise estimate. + + A channel is flagged only when its noise is BOTH: + - above ``SIGNAL_NOISE_FLAG_UV`` µV in absolute terms, AND + - more than ``SIGNAL_NOISE_REL_FACTOR`` × the group median. + + Both conditions are required so that a session where every channel is + a bit noisy doesn't trigger spurious warnings — only individually + broken channels are surfaced. + + Note: this is *not* an impedance check. Real impedance measurement + requires putting the amplifier into a dedicated test mode and is not + supported by all BrainFlow backends. Baseline noise is a proxy that + works for the common failure mode (loose / dry electrodes). + + Returns: + Dict with: + ``stds`` per-channel detrended std (µV) + ``median`` group median (µV) + ``flagged`` list of ``(channel, std_uv)`` for broken contacts + ``skipped`` True if not run (non-brainflow backend or error) + """ + result = {'stds': {}, 'median': None, 'flagged': [], 'skipped': True} + + if not eeg or getattr(eeg, 'backend', None) != 'brainflow': + return result + + try: + sfreq = int(getattr(eeg, 'sfreq', 250)) + n_samples = sfreq * n_seconds + + eeg._start_brainflow() + sleep(n_seconds) + raw = eeg.board.get_current_board_data(n_samples) + ch_names, eeg_data, _ = eeg._brainflow_extract(raw) + + # Stop so the main eeg.start() can restart cleanly later + eeg.board.stop_stream() + eeg.board.release_session() + eeg_data = np.array(eeg_data) + win = sfreq + for ch_name, x in zip(ch_names, eeg_data): + if len(x) < win: + std = float(np.std(x)) + else: + rolling = np.convolve(x, np.ones(win) / win, mode='same') + std = float(np.std(x - rolling)) + result['stds'][ch_name] = round(std, 1) + + if result['stds']: + med = float(np.median(list(result['stds'].values()))) + result['median'] = round(med, 1) + + # If the median itself is huge, the reference/ground is likely bad, + # so the relative check (3x median) will hide the noise. In this case, + # just flag anything over the absolute threshold. + bad_ref_mode = med > SIGNAL_NOISE_FLAG_UV + + for ch, s in result['stds'].items(): + if bad_ref_mode: + if s > SIGNAL_NOISE_FLAG_UV: + result['flagged'].append((ch, s)) + else: + if s > SIGNAL_NOISE_FLAG_UV and s > SIGNAL_NOISE_REL_FACTOR * med: + result['flagged'].append((ch, s)) + + result['skipped'] = False + print(f"[signal-check] {len(ch_names)} ch over {n_seconds}s — " + f"median noise = {result['median']} µV, " + f"flagged: {[c for c, _ in result['flagged']] or 'none'}") + return result + except Exception as e: + print(f"[signal-check] skipped — {e}") + return result + + +# --------------------------------------------------------------------------- +# Post-session report +# --------------------------------------------------------------------------- + +def post_session_report(save_fn): + """Build the recording quality report string for display after a session.""" + from eegnb.analysis.recording_quality import report_session + return report_session(pathlib.Path(save_fn).parent) + + +# --------------------------------------------------------------------------- +# Diagnostics screen formatting +# --------------------------------------------------------------------------- + +def format_diagnostic_warnings(*, device_name=None, display=None, signal_check=None): + """Build warnings for the pre-experiment diagnostics screen. + + Returns a list of strings — empty if everything's fine. + """ + warnings = [] + + if device_name and 'synthetic' in device_name.lower(): + warnings.append( + "[!] SYNTHETIC EEG DEVICE — NO REAL DATA WILL BE RECORDED\n" + " Set eeg_device = EEG(device, ...) in your run script before re-running." + ) + + if display and not display.get('ok', True): + warnings.append( + f"[!] DISPLAY WARNING — frame delivery severely degraded\n" + f" Target: {display['target_hz']:.0f} Hz - " + f"Measured: {display['actual_hz']:.1f} Hz " + f"({display['deviation_pct']:.1f}% off)\n" + f" Likely cause: wrong GPU selected, or GPU acceleration disabled.\n" + f" Fix: set python.exe to NVIDIA in NVIDIA Control Panel and\n" + f" Windows Graphics Settings, then restart." + ) + + if signal_check and signal_check.get('flagged'): + n_flagged = len(signal_check['flagged']) + n_total = len(signal_check.get('stds', {})) + ch_info = ", ".join(f"{ch} ({std:.0f} µV)" for ch, std in signal_check['flagged']) + + msg = ( + f"[!] SIGNAL QUALITY WARNING\n" + f" Channels with abnormally high noise: {ch_info}\n" + f" (group median is {signal_check['median']} µV)\n" + ) + + if n_total > 0 and n_flagged >= (n_total / 2.0): + msg += ( + f" Likely cause: Bad Reference (M1) or Ground (A2) connection.\n" + f" When noise is universally high across the head, the shared\n" + f" reference is usually loose or dry. Re-seat M1 and A2." + ) + else: + msg += ( + f" Likely cause: loose electrode or dry paste. Re-seat the\n" + f" listed contact(s) before continuing." + ) + warnings.append(msg) + + return warnings diff --git a/eegnb/experiments/rest/eoec.py b/eegnb/experiments/rest/eoec.py index 38ed467db..b95d196e1 100644 --- a/eegnb/experiments/rest/eoec.py +++ b/eegnb/experiments/rest/eoec.py @@ -136,12 +136,19 @@ def present_stimulus(self, idx: int): else: self.close_sound.play() + self._draw_block_cue(label) + + def _draw_block_cue(self, label): if label == 0: self.fixation.draw() else: self.close_text.draw() self.window.flip() + def present_soa(self, idx: int): + label = self.trials["parameter"].iloc[idx] + self._draw_block_cue(label) + def run(self, instructions: bool = True): try: super().run(instructions) diff --git a/eegnb/experiments/visual_n170/n170.py b/eegnb/experiments/visual_n170/n170.py index 231381ff7..29a8a013d 100644 --- a/eegnb/experiments/visual_n170/n170.py +++ b/eegnb/experiments/visual_n170/n170.py @@ -40,9 +40,9 @@ def present_stimulus(self, idx: int): # Get the label of the trial label = self.trials["parameter"].iloc[idx] # Get the image to be presented - image = choice(self.faces if label == 1 else self.houses) + self._current_image = choice(self.faces if label == 1 else self.houses) # Draw the image - image.draw() + self._current_image.draw() # Pushing the sample to the EEG @@ -64,4 +64,8 @@ def present_stimulus(self, idx: int): self.window.flip() + def present_soa(self, idx: int): + self._current_image.draw() + self.window.flip() + diff --git a/eegnb/experiments/visual_p300/p300.py b/eegnb/experiments/visual_p300/p300.py index d6a8a1df3..8e6974241 100644 --- a/eegnb/experiments/visual_p300/p300.py +++ b/eegnb/experiments/visual_p300/p300.py @@ -38,8 +38,8 @@ def load_stimulus(self): def present_stimulus(self, idx: int): label = self.trials["parameter"].iloc[idx] - image = choice(self.targets if label == 1 else self.nontargets) - image.draw() + self._current_image = choice(self.targets if label == 1 else self.nontargets) + self._current_image.draw() # Push sample if self.eeg: @@ -50,4 +50,8 @@ def present_stimulus(self, idx: int): marker = self.markernames[label] self.eeg.push_sample(marker=marker, timestamp=timestamp) + self.window.flip() + + def present_soa(self, idx: int): + self._current_image.draw() self.window.flip() \ No newline at end of file diff --git a/eegnb/experiments/visual_vep/__init__.py b/eegnb/experiments/visual_vep/__init__.py index e69de29bb..514a3492b 100644 --- a/eegnb/experiments/visual_vep/__init__.py +++ b/eegnb/experiments/visual_vep/__init__.py @@ -0,0 +1,10 @@ +"""Visual Evoked Potential (VEP) experiments module. + +This module contains experiments for measuring visual evoked potentials, +including pattern reversal VEP for assessing the P100 component. +""" + +from .grating_vep import VisualGratingVEP +from .pattern_reversal_vep import VisualPatternReversalVEP + +__all__ = ['VisualGratingVEP', 'VisualPatternReversalVEP'] diff --git a/eegnb/experiments/visual_vep/vep.py b/eegnb/experiments/visual_vep/grating_vep.py similarity index 97% rename from eegnb/experiments/visual_vep/vep.py rename to eegnb/experiments/visual_vep/grating_vep.py index 11d076cc4..c661b94c0 100644 --- a/eegnb/experiments/visual_vep/vep.py +++ b/eegnb/experiments/visual_vep/grating_vep.py @@ -5,7 +5,7 @@ from eegnb.devices.eeg import EEG -class VisualVEP(Experiment.BaseExperiment): +class VisualGratingVEP(Experiment.BaseExperiment): def __init__(self, duration=120, eeg: Optional[EEG]=None, save_fn=None, diff --git a/eegnb/experiments/visual_vep/pattern_reversal_vep.py b/eegnb/experiments/visual_vep/pattern_reversal_vep.py new file mode 100644 index 000000000..4adfa2d46 --- /dev/null +++ b/eegnb/experiments/visual_vep/pattern_reversal_vep.py @@ -0,0 +1,323 @@ +import logging +import random +import numpy as np + +from psychopy import visual +from typing import Optional, Dict, Any +from eegnb.devices.eeg import EEG +from eegnb.experiments.BlockExperiment import BlockExperiment +from eegnb.analysis.vep_utils import ISCEV_CHECK_DEG_LARGE, ISCEV_CHECK_DEG_SMALL +from stimupy.stimuli.checkerboards import contrast_contrast + +# ISCEV PR-VEP standard +ISCEV_FIELD_DEG = 16.0 +ISCEV_MEAN_LUM = 0.0 + +# Block conditions: 4 possible combinations of (eye, size) +CONDITIONS = [ + {'eye': 'left', 'size_name': 'large', 'size_idx': 0, 'check_deg': ISCEV_CHECK_DEG_LARGE}, + {'eye': 'right', 'size_name': 'large', 'size_idx': 0, 'check_deg': ISCEV_CHECK_DEG_LARGE}, + {'eye': 'left', 'size_name': 'small', 'size_idx': 1, 'check_deg': ISCEV_CHECK_DEG_SMALL}, + {'eye': 'right', 'size_name': 'small', 'size_idx': 1, 'check_deg': ISCEV_CHECK_DEG_SMALL}, +] + +# Hierarchical event tags → integer marker codes. The slash-delimited tags let +# MNE epoch by partial match (e.g. event_id key 'rev/left' selects both sizes). +# Kept stable across recordings so analysis can hard-code this dict. +EVENTS = { + **{f"rev/{c['eye']}/{c['size_name']}": 1 + i for i, c in enumerate(CONDITIONS)}, + **{f"block/{c['eye']}/{c['size_name']}": 100 + i for i, c in enumerate(CONDITIONS)}, +} + + +class VisualPatternReversalVEP(BlockExperiment): + + def __init__(self, eeg: Optional[EEG] = None, save_fn=None, + block_duration_seconds: int = 50, + block_trial_size: int = 100, + reps_per_condition: int = 2, + use_vr: bool = False, + use_fullscr: bool = True): + """ + Pattern Reversal VEP with two check sizes, counterbalanced across blocks. + + Block schedule: 4 shuffled conditions (left/right eye, large/small check) × + ``reps_per_condition`` blocks. Block-start markers (100–103) are pushed on + the first reversal of each block to record the condition sequence. + """ + n_conditions = 4 + n_blocks = n_conditions * reps_per_condition + soa = 0.5 + + super().__init__( + "Visual Pattern Reversal VEP", + block_duration_seconds, eeg, save_fn, + block_trial_size, n_blocks, + iti=0, soa=soa, jitter=0, + use_vr=use_vr, use_fullscr=use_fullscr, stereoscopic=True, + ) + + self.instruction_text = ( + f"Welcome to the Pattern Reversal VEP experiment!\n\n" + f"{n_blocks} blocks of {block_duration_seconds} s each.\n" + f"left/right eye × large/small checks)\n\n" + f"Press spacebar or trigger to continue." + ) + + # Build block schedule grouped by eye. + left_eye_blocks = [i for i, c in enumerate(CONDITIONS) if c['eye'] == 'left'] * reps_per_condition + right_eye_blocks = [i for i, c in enumerate(CONDITIONS) if c['eye'] == 'right'] * reps_per_condition + + random.shuffle(left_eye_blocks) + random.shuffle(right_eye_blocks) + + # Randomize which eye goes first + if random.random() < 0.5: + self.block_labels = left_eye_blocks + right_eye_blocks + else: + self.block_labels = right_eye_blocks + left_eye_blocks + + logging.info("[PRVEP] block schedule: %s", self.block_labels) + + # Expand into a per-trial parameter array. + self.parameter = np.array( + [lbl for lbl in self.block_labels for _ in range(block_trial_size)] + ) + + # ------------------------------------------------------------------ + # Stimulus creation helpers + # ------------------------------------------------------------------ + + @staticmethod + def make_checker_image(intensity_checks, check_deg, field_deg=ISCEV_FIELD_DEG, ppd=72): + cpd = 1.0 / (2.0 * check_deg) + return contrast_contrast( + visual_size=(field_deg, field_deg), + ppd=ppd, + frequency=(cpd, cpd), + intensity_checks=intensity_checks, + target_shape=(0, 0), + alpha=0, + tau=0, + )['img'] + + def load_stimulus(self) -> Dict[str, Any]: + refresh_rate = int(np.round( + self.window.displayRefreshRate if self.use_vr + else self.window.getActualFrameRate() + )) + reversals_per_sec = 1 / self.soa + assert refresh_rate % reversals_per_sec == 0, ( + f"Frame rate {refresh_rate} Hz must be an integer multiple of " + f"{reversals_per_sec} Hz reversal rate" + ) + + if self.use_vr: + ppd, _ = self.vr.log_display_info() + logging.info( + "[PRVEP-HMD] optical_axis_ndc=L%+.3f/R%+.3f", + self.left_eye_x_pos, self.right_eye_x_pos, + ) + tex_px = int(round(ISCEV_FIELD_DEG * ppd)) + stim_size_px = (tex_px, tex_px) + else: + ppd = 72 + stim_size_px = (self.window_size[1], self.window_size[1]) + + self.grey_background = visual.Rect( + self.window, + width=self.window.size[0], height=self.window.size[1], + fillColor=[ISCEV_MEAN_LUM] * 3, + ) + self.black_background = visual.Rect( + self.window, + width=self.window.size[0], height=self.window.size[1], + fillColor='black', + ) + + def make_checker_stim(intensity_checks, check_deg, pos): + return visual.ImageStim( + self.window, + image=self.make_checker_image(intensity_checks, check_deg, ppd=ppd), + units='pix', size=stim_size_px, color='white', pos=pos, + ) + + # Fixation cross: explicit '+' polygon so arm length and arm width are + # independent. At low VR ppd (~20) the old single-size ShapeStim + # rendered as a ~5 px blob that looked like a diamond and connected + # visually with nearby checkerboard corners (scintillating-grid effect). + # Arm half-length 0.3° keeps the cross inside one check cell at the + # small-check size (0.5°); arm width 0.12° gives a clearly legible + # stroke at VR ppd without occluding foveal stimulation. + FIX_ARM_DEG = 0.30 # half-length from centre to each arm tip + FIX_W_DEG = 0.12 # arm width (stroke thickness) + arm_px = max(6, int(round(FIX_ARM_DEG * ppd))) + w_px = max(2, int(round(FIX_W_DEG * ppd))) + + def _cross_verts(a, w): + h = w / 2 + return [(-h, a), (h, a), (h, h), (a, h), (a, -h), + (h, -h), (h, -a), (-h, -a), (-h, -h), (-a, -h), + (-a, h), (-h, h)] + + def make_fixation(pos_px): + return visual.ShapeStim( + win=self.window, pos=pos_px, + vertices=_cross_verts(arm_px, w_px), + units='pix', closeShape=True, + fillColor=[1, -1, -1], lineColor=[-1, -1, -1], lineWidth=1, + ) + + self.instruction_stim = visual.TextStim( + win=self.window, + text="", + color=[-1, -1, -1], + pos=(0, 0), + height=0.08, + units='norm', + wrapWidth=1.8, + ) + + def make_eye_stimuli(eye_x_pix): + # Two check-size variants per eye: index 0 = large, 1 = small. + return { + 'checkerboards': [ + [ + make_checker_stim((1, -1), ISCEV_CHECK_DEG_LARGE, (eye_x_pix, 0)), + make_checker_stim((-1, 1), ISCEV_CHECK_DEG_LARGE, (eye_x_pix, 0)), + ], + [ + make_checker_stim((1, -1), ISCEV_CHECK_DEG_SMALL, (eye_x_pix, 0)), + make_checker_stim((-1, 1), ISCEV_CHECK_DEG_SMALL, (eye_x_pix, 0)), + ], + ], + 'fixation': make_fixation([eye_x_pix, 0]), + } + + if self.use_vr: + w = self.window.size[0] + return { + 'left': make_eye_stimuli(self.left_eye_x_pos * (w / 2)), + 'right': make_eye_stimuli(self.right_eye_x_pos * (w / 2)), + } + else: + return {'monoscopic': make_eye_stimuli(0)} + + # ------------------------------------------------------------------ + # Block instructions + # ------------------------------------------------------------------ + + def block_eye_and_size(self, block_index: int): + c = CONDITIONS[self.block_labels[block_index]] + return c['eye'], c['size_name'] + + def present_block_instructions(self, current_block: int) -> None: + open_eye, size_name = self.block_eye_and_size(current_block) + closed_eye = 'right' if open_eye == 'left' else 'left' + + # Check if the eye just switched so we can prompt them to move the patch + is_first_block_for_eye = (current_block == 0) or (self.block_eye_and_size(current_block - 1)[0] != open_eye) + + patch_prompt = f"*** MOVE PATCH TO {closed_eye.upper()} EYE NOW ***\n" if is_first_block_for_eye else "" + + if self.use_vr: + # Re-assert height each call — VR state changes (calcEyePoses / + # setBuffer projection) can corrupt the cached norm-unit size on + # the shared instruction_stim, causing oversized text from block 2 + # onwards. Setting .height forces PsychoPy to recompute the glyph + # layout before draw, keeping it consistent across all blocks. + self.instruction_stim.height = 0.08 + self.instruction_stim.wrapWidth = 1.8 + for eye in ['left', 'right']: + self.window.setBuffer(eye) + + if eye == closed_eye: + self.black_background.draw() + self.instruction_stim.color = [1, 1, 1] + else: + self.grey_background.draw() + self.instruction_stim.color = [-1, -1, -1] + + self.instruction_stim.text = ( + f"Block {current_block + 1}/{self.n_blocks} — " + f"{open_eye} eye, {size_name} checks\n\n" + f"{patch_prompt}" + f"Please ensure your {closed_eye} eye is physically covered (e.g. tissue/patch),\n" + f"but keep BOTH eyes open underneath to prevent muscle artifacts.\n\n" + "Focus on the red dot.\n" + "Try not to blink while the squares are animating.\n" + "Press spacebar or trigger when ready." + ) + + self.instruction_stim.draw() + + if eye == open_eye: + self.stim[eye]['fixation'].draw() + else: + text = ( + f"Block {current_block + 1}/{self.n_blocks}\n\n" + f"{patch_prompt}" + f"Cover your {closed_eye} eye with a patch (keep both eyes open).\n" + f"Focus on the red dot with your {open_eye} eye.\n" + f"Check size: {size_name} ({ISCEV_CHECK_DEG_LARGE if size_name == 'large' else ISCEV_CHECK_DEG_SMALL}°)\n\n" + "Press spacebar when ready." + ) + visual.TextStim(win=self.window, text=text, color=[-1, -1, -1]).draw() + self.stim['monoscopic']['fixation'].draw() + self.window.flip() + + # ------------------------------------------------------------------ + # Stimulus presentation + # ------------------------------------------------------------------ + + def present_stimulus(self, idx: int): + # Push block-start marker on the first reversal of each block. + # This lands in the EEG file before the first reversal marker and + # encodes the full condition (eye × check-size) for the analysis. + if idx == 0: + c = CONDITIONS[self.block_labels[self.current_block_index]] + self.push_vr_marker( + EVENTS[f"block/{c['eye']}/{c['size_name']}"], + self.current_block_index * self.block_trial_size, + ) + self.draw_frame(idx) + self.push_marker(idx) + + def draw_frame(self, idx: int): + trial_idx = self.current_block_index * self.block_trial_size + idx + c = CONDITIONS[int(self.parameter[trial_idx])] + eye, size_idx = c['eye'], c['size_idx'] + phase = idx % 2 # alternates 0 / 1 for each reversal + + if self.use_vr: + closed_eye = 'right' if eye == 'left' else 'left' + self.window.setBuffer(eye) + self.grey_background.draw() + self.stim[eye]['checkerboards'][size_idx][phase].draw() + self.stim[eye]['fixation'].draw() + self.window.setBuffer(closed_eye) + self.black_background.draw() + else: + self.grey_background.draw() + self.stim['monoscopic']['checkerboards'][size_idx][phase].draw() + self.stim['monoscopic']['fixation'].draw() + + self.window.flip() + + def push_marker(self, idx: int): + trial_idx = self.current_block_index * self.block_trial_size + idx + c = CONDITIONS[int(self.parameter[trial_idx])] + self.push_vr_marker(EVENTS[f"rev/{c['eye']}/{c['size_name']}"], trial_idx) + + def present_soa(self, idx: int): + # Keep the compositor fed at full frame rate; no marker push. + self.draw_frame(idx) + + def present_iti(self): + if self.use_vr: + for eye in ('left', 'right'): + self.window.setBuffer(eye) + self.grey_background.draw() + else: + self.grey_background.draw() + self.window.flip() diff --git a/environments/eeg-expy-docsbuild.yml b/environments/eeg-expy-docsbuild.yml index 31eae3a6e..b699bbb17 100644 --- a/environments/eeg-expy-docsbuild.yml +++ b/environments/eeg-expy-docsbuild.yml @@ -3,10 +3,11 @@ channels: - defaults dependencies: # System-level dependencies - - python>=3.8,<=3.13 + - python >=3.10 # higher versions allowed for experimental builds + - setuptools - pytables # install pytables for macOS arm64, so do not need to build from source. - rust # used by docsbuild - pip - pip: # Install package with only Analysis requirements - - -e ..[docsbuild] \ No newline at end of file + - -e ..[docsbuild] diff --git a/environments/eeg-expy-full.yml b/environments/eeg-expy-full.yml index d3c160f01..2e096c6dc 100644 --- a/environments/eeg-expy-full.yml +++ b/environments/eeg-expy-full.yml @@ -3,7 +3,8 @@ channels: - defaults dependencies: # System-level dependencies - - python>=3.8,<=3.10 # psychopy <= 3.10 + - python>=3.10,<=3.11 # psychopy 2026.x requires py3.10; higher versions for experimental builds + - setuptools - dukpy==0.2.3 # psychopy dependency, avoid failing due to building wheel on win 3.9. - numpy # fix PsychXR numpy dependency DLL issues on Windows - pytables # install pytables for macOS arm64, so do not need to build from source. diff --git a/environments/eeg-expy-stimpres.yml b/environments/eeg-expy-stimpres.yml index de7ed1178..dd6e36096 100644 --- a/environments/eeg-expy-stimpres.yml +++ b/environments/eeg-expy-stimpres.yml @@ -3,7 +3,8 @@ channels: - defaults dependencies: # System-level dependencies - - python>=3.8,<=3.10 # psychopy <= 3.10 + - python >=3.10 # psychopy 2026.x requires py3.10; higher versions allowed for experimental builds + - setuptools - dukpy==0.2.3 # psychopy dependency, avoid failing due to building wheel on win 3.9. - wxpython>=4.0 # install wxpython to prevent error on macOS arm64: "site-packages/wx/_core.cpython-38-darwin.so, 0x0002): symbol not found in flat namespace '__ZN10wxBoxSizer20InformFirstDirectionEiii'" - cffi # Fix sound ffi.callback() issue with sounddevice on macOS: https://github.com/spatialaudio/python-sounddevice/issues/397 diff --git a/environments/eeg-expy-streaming.yml b/environments/eeg-expy-streaming.yml index 2b7f87af7..800ab4cc6 100644 --- a/environments/eeg-expy-streaming.yml +++ b/environments/eeg-expy-streaming.yml @@ -3,7 +3,8 @@ channels: - defaults dependencies: # System-level dependencies - - python>=3.8,<=3.13 + - python >=3.10 # higher versions allowed for experimental builds + - setuptools - liblsl # install liblsl to prevent error on macOS and Ubuntu: "RuntimeError: LSL binary library file was not found." - pip - pip: diff --git a/environments/eeg-expy-streamstim.yml b/environments/eeg-expy-streamstim.yml index 8ed52571f..4942ff41b 100644 --- a/environments/eeg-expy-streamstim.yml +++ b/environments/eeg-expy-streamstim.yml @@ -3,7 +3,8 @@ channels: - defaults dependencies: # System-level dependencies - - python>=3.8,<=3.10 # psychopy <= 3.10 + - python>=3.10,<=3.13 # psychopy <= 3.10; higher versions for experimental builds + - setuptools - dukpy==0.2.3 # psychopy dependency, avoid failing due to building wheel on win 3.9. - liblsl # install liblsl to prevent error on macOS and Ubuntu: "RuntimeError: LSL binary library file was not found." - wxpython>=4.0 # install wxpython to prevent error on macOS arm64: "site-packages/wx/_core.cpython-38-darwin.so, 0x0002): symbol not found in flat namespace '__ZN10wxBoxSizer20InformFirstDirectionEiii'" diff --git a/examples/visual_vep/00x__pattern_reversal_run_experiment.py b/examples/visual_vep/00x__pattern_reversal_run_experiment.py new file mode 100644 index 000000000..5b4b3ae35 --- /dev/null +++ b/examples/visual_vep/00x__pattern_reversal_run_experiment.py @@ -0,0 +1,110 @@ +""" +PRVEP Run Experiment +========================================== + +Pattern Reversal VEP using PsychoPy on a standard monitor or Meta Quest +headset (via psychxr / Meta-Link). The EEG device and save-file are owned +by this script; the stimulus is driven by PsychoPy. + +Block schedule: 4 conditions (left/right eye × large/small check) × +``reps_per_condition`` reps = 8 blocks, shuffled at startup. + +Marker codes: + 1 — reversal, left-eye block + 2 — reversal, right-eye block + + Block-start codes (bit 0 = eye, bit 1 = size class): + 100 — block_start, left eye, large check (~60 arcmin / 1 deg) + 101 — block_start, right eye, large check + 102 — block_start, left eye, small check (~30 arcmin / 0.5 deg) + 103 — block_start, right eye, small check +""" + +################################################################################################### +# Setup +# --------------------- +# +# Imports + +import platform +from os import getenv +from dotenv import load_dotenv +load_dotenv() + +from eegnb import generate_save_fn +from eegnb.devices import CYTON_CONFIG_GAIN_4X +from eegnb.devices.eeg import EEG +from eegnb.experiments.visual_vep import VisualPatternReversalVEP + +################################################################################################### +# Configuration +# --------------------- +# +# Set your experiment parameters here before running. +# + +# Display: set use_vr=True for Meta Quest, False for monitor +use_vr = True + +# Device: "cyton", "unicorn", "muse2", etc. +device = "cyton" + +# Serial port: "COM3" for Windows, "/dev/ttyUSB0" for Linux +serial_port = "COM3" + +# Config: CYTON_CONFIG_GAIN_4X needed for Thinkpulse active electrodes, otherwise leave as None. +config = None + +# Electrode montage type: "cap" or "mark-iv" +montage_type = "cap" + +# Ground A2, Ref Fz. +ch_names = ["M1", "Pz", "P7", "P8", "O1", "O2", "Oz", "M2"] + +# Subject and session identifiers +subject_id = 0 +session_nb = 16 + +################################################################################################### +# Initiate EEG device +# --------------------- +# +# Start EEG device based on configuration above. +eeg_device = EEG(device, serial_port=serial_port, ch_names=ch_names, config=config) +#eeg_device = EEG(device="synthetic") + +################################################################################################### +# Build experiment object and detect display settings +# --------------------- +# +# The experiment is constructed before the save path so the Rift session is +# already open and we can read the actual refresh rate from the runtime rather +# than hardcoding it. The save path is then built from the real Hz and set on +# the experiment before run() is called. + +pattern_reversal_vep = VisualPatternReversalVEP( + eeg=eeg_device, + use_vr=use_vr +) + +if use_vr: + _QUEST_HZ = [72, 90, 120] # nominal Meta Quest refresh rates + _raw_hz = pattern_reversal_vep.vr.displayRefreshRate + refresh_rate = min(_QUEST_HZ, key=lambda h: abs(h - _raw_hz)) + display = f"quest-2_{refresh_rate}Hz" +else: + refresh_rate = 100 # flat display fallback — update for your monitor + display = f"acer-34-predator_{refresh_rate}Hz" + +site = f"{display}_{montage_type}" +data_dir = getenv("DATA_DIR") +save_fn = generate_save_fn(eeg_device.device_name, + experiment="visual-PRVEP", + site=site, + subject_id=subject_id, + session_nb=session_nb, + data_dir=data_dir) +print(f"Saving data to: {save_fn} (detected {refresh_rate} Hz)") +pattern_reversal_vep.save_fn = save_fn + +pattern_reversal_vep.run() diff --git a/examples/visual_vep/01r__pattern_reversal_viz.py b/examples/visual_vep/01r__pattern_reversal_viz.py new file mode 100644 index 000000000..03b6c920f --- /dev/null +++ b/examples/visual_vep/01r__pattern_reversal_viz.py @@ -0,0 +1,2010 @@ +# --- +# jupyter: +# jupytext: +# formats: ipynb,py:sphinx +# text_representation: +# extension: .py +# format_name: sphinx +# format_version: '1.1' +# jupytext_version: 1.19.1 +# kernelspec: +# display_name: eeg-experiments +# language: python +# name: python3 +# --- + +""" + +# Pattern Reversal VEP: Load and Visualize + +This example demonstrates loading, organizing, and visualizing evoked response +data from the Visual Pattern Reversal VEP (PR-VEP) experiment. + +An animation of a checkerboard reversal is shown (the checkerboard squares' +colours are toggled once each half a second). Stimulus is rendered stereoscopically +through a Meta Quest HMD, with synchronization markers (triggers) sent to the OpenBCI Cyton. + +The data used is recorded using an OpenBCI Cyton with a Tencom 20 channel cap, +with cup electrodes placed at M1 reference, Fz, Pz, P7, P8, O1, O2, Oz, M2 and an ear-clip on A2 for ground. + +Per-trial PC-side latency correction is applied using ``app_motion_to_photon_latency_s`` +from the LibOVR compositor frame stats sidecar, and the residual Quest Link + +panel lag is handled by a fixed ``link_panel_lag`` constant. + +**12 biomarkers** are computed, grouped into three sections: + +- **Pre-chiasmatic / optic nerve** (BM1–BM5): inter-ocular latency difference + (IOLD), per-size IOLD, spatial-frequency slope, amplitude ratio, bootstrap CIs. +- **Morphological** (BM6): W-peak / bifurcated P100 detection. +- **Post-chiasmatic / cortical** (BM7–BM12): hemispheric O1/O2 asymmetry, + inter-ocular Δ-asymmetry, lateral P7/P8, lateral Δ-asymmetry, composite + hemispheres, topology QC. + +Results are persisted to ``biomarkers.json`` in the recording directory and +consumed by ``02r__pattern_reversal_longitudinal.py`` for trend analysis. + +""" + +############################################################################### +# ## Setup +# +# + +import os +import json +import datetime +import numpy as np +import warnings +warnings.filterwarnings('ignore') + +import matplotlib.pyplot as plt +plt.ion() +import pandas as pd + +from scipy.stats import trim_mean +from scipy.signal import find_peaks + +from mne import Epochs, EvokedArray, find_events, concatenate_raws + +from eegnb import get_recording_dir +from eegnb.analysis.utils import load_csv_as_raw +from eegnb.datasets import fetch_dataset +from eegnb.analysis.vep_utils import ( + get_pr_vep_latencies, + ISCEV_CHECK_DEG_LARGE, ISCEV_CHECK_DEG_SMALL, + IOLD_FLAG_MS, LOG2_AMP_FLAG, + trimmed_average, json_safe_float, + compute_iold, compute_iold_per_size, compute_amplitude_ratio, compute_check_size_slope, + bootstrap_p100_latency, compute_hemi_asymmetry, compute_hemi_delta_asymmetry, +) +_f = json_safe_float +from eegnb.devices.utils import EEG_INDICES, SAMPLE_FREQS + +# sphinx_gallery_thumbnail_number = 3 + +############################################################################### +# ## Hardware lag definitions (PsychoPy / Meta-Link path) +# +# +# Meta-Link path total flip-return → photon delay on this rig is split into a measured +# part and an unmeasured residual: +# +# - **Measured per-trial (PC side)**: ``app_motion_to_photon_latency_s`` from +# LibOVR frame stats, applied trial-by-trial below. +# - **Residual fit (link_panel_lag)**: Quest Link video encode/decode + USB transport +# + panel scan-out + LCD response. Rough budgets from public benchmarks: +# Link transport ≈ 20–40 ms, Panel + LCD ≈ 10–20 ms, total range ≈ 30–60 ms. +# The Cyton RF transmission adds a further unmeasured ~1–5 ms (proprietary stack, +# not standard BLE). +# +# +# + +############################################################################### +# ## Stimulus calibration caveat -- Quest panel is not ISCEV-calibrated +# +# Clinical PR-VEP norms (P100 ~100 ms, ~10-20 uV at Oz-Fz on adults) are +# derived from photometrically-spec'd CRT or LCD monitors at a fixed +# viewing distance. The Quest setup deviates on the panel-photometry +# axis -- the Quest 2 fast-switching LCD is not calibrated to a specific +# cd/m^2 or contrast ratio. Cortical drive (and therefore P100 amplitude) +# scales with these, so absolute amplitudes will diverge from clinical +# norms by a constant factor. +# +# Fortunately, this is still highly effective for differential analysis! +# Because the display characteristics are stable, within-subject comparisons +# (like inter-ocular differences) remain extremely robust. Furthermore, the +# fast-switching LCD provides excellent temporal precision, yielding highly +# reliable latency measurements despite the absolute amplitude shift. +# +# Field & check size are NOT a calibration concern on this path. The +# stimulus is rendered at a runtime-derived PPD (Pixels Per Degree at +# field centre) read from the OVR runtime each session via +# ``Rift.pixelsPerTanAngleAtCenter`` -- not estimated from a spec sheet. +# IPD (Inter-Pupillary Distance, the distance between the user's pupils +# in mm) is similarly read from the runtime via ``eyeToNoseDistance`` and +# is already baked into the per-eye projection matrices PsychoPy uses, +# so it does not bias the angular extent of stimulus content. Both PPD +# and IPD are written into the ``_timing.csv`` header at session start +# (see ``log_display_info`` in ``eegnb/devices/vr.py``), and +# the stimupy checkerboard is then sized by a prescribed degrees-per-check +# value (1.0 deg = 60 arcmin = ISCEV "large", 0.25 deg = 15 arcmin = +# ISCEV "small"). Compositor barrel-distortion correction is applied +# downstream of the rendered eye-buffer texture and does not enter the +# calculation. The residual uncertainty (much smaller than a spec-sheet +# estimate) comes from eye relief -- how far the user's pupil sits from +# the lens, which slightly biases off-axis angular size and peripheral +# vignetting -- and per-unit lens manufacturing variance. Both affect +# peripheral checks more than the foveal ones that drive P100, so for +# ISCEV-relevant central-field analysis the residual is well under 1%. +# +# Expected morphology differences (to characterise as more recordings come in): +# - Larger early negative deflections (N75-ish) plausible if panel contrast +# / field size drives stronger extrastriate contribution than clinical +# norms assume. +# - Latency offset ~10-20 ms on the Meta-Link path. +# +# Absolute P100 latency / amplitude here are NOT interchangeable with +# clinical PR-VEP norms. Differential biomarkers (IOLD, slope, amplitude +# ratio) are robust to these confounds and remain interpretable. +# +# +# + +############################################################################### +# ## Bracketing / replacing ``link_panel_lag`` empirically +# +# The 25 ± 15 ms residual below is a budgeted estimate, not a measurement. +# Differential biomarkers (IOLD, slopes, ratios, Δ-asymmetry) are robust to +# this offset because both eyes share the same path, so the residual cancels +# in any L−R contrast. Absolute P100 latency (vs clinical norms) does NOT +# survive the residual and should not be reported as a clinical number until +# the residual is pinned down. Two paths to do that, in increasing order of +# rigour: +# +# 1. **Software baseline (error-prone, free).** Run a session on a control +# subject with intact optic pathways and compare the measured P100 (after +# PC-side correction only, ``link_panel_lag = 0``) to the clinical norm +# (~100 ms at 60 arcmin on a calibrated CRT). The shift between observed +# and norm is the residual. Caveats: the Quest panel isn't ISCEV-calibrated +# so contrast/luminance will perturb absolute latency by an unknown amount +# on top of the chain delay; controls vary ±5–10 ms at baseline; and any +# one subject's value is noisy. Useful to *bracket* the residual to within +# ~10 ms but not to certify it. +# +# 2. **Photodiode / optode (gold standard).** Tape a photodiode onto one HMD +# eye lens facing the panel and route its analogue output to a Cyton aux +# channel (or a second trigger line). The diode fires when actual photons +# arrive at the eye — i.e. measures the entire chain (PsychoPy flip → +# Quest Link → panel scan-out → LCD response) in a single sample. With a +# photodiode trigger present, ``link_panel_lag`` becomes 0 by construction +# and the per-trial PC-side correction is no longer needed either: +# epoching off the diode event aligns trials to actual stimulus onset +# with sub-frame precision. This makes absolute P100 latency a usable +# biomarker rather than an estimate. The native-Quest absolute-latency +# target supersedes this on its release path; for the meta-link path here, +# a diode would close the gap immediately. +# +# ############################################################################## + +# Center of estimated unmeasured residual range (s) +link_panel_lag = 0.025 +# ± half-range (s) +link_panel_lag_err = 0.015 + +############################################################################### +# ## Load Data +# +# Load all recordings for the session and concatenate into a single raw object. +# The timing sidecar CSV is parsed per-file and concatenated to match events. +# + + +# --- CHANGE THESE PLACEHOLDERS TO POINT AT YOUR OWN RECORDING --------------- +SUBJECT_ID = 0 +SESSION_NB = 16 +DEVICE_NAME = 'cyton' +EXPERIMENT = 'visual-PRVEP' +DISPLAY = 'quest-2_120Hz' +MONTAGE = 'cap' +SITE = f'eegnb_examples/{DISPLAY}_{MONTAGE}' +# From session 016 the Fz cup moved to the SRB pin (hardware reference) and +# the old M1 cup moved to channel 1. Applied per-recording at load time so +# recordings with the old CSV header ('Fz') and new ('M1') can be concatenated. +CH_REMAP = {'Fz': 'M1'} if SESSION_NB >= 16 else {} +# Minimum recording duration — skips short setup/restart runs. +# Move unwanted longer recordings to bad_recordings/ in the session directory; +# the glob will not find them there. +MIN_RECORDING_SECS = 120 +# --------------------------------------------------------------------------- + +eegnb_data_path = os.path.join(os.path.expanduser('~/'), '.eegnb', 'data') +prvep_data_path = os.path.join(eegnb_data_path, EXPERIMENT, 'eegnb_examples') + +if not os.path.isdir(prvep_data_path): + print("Downloading PR-VEP example dataset from Google Drive...") + fetch_dataset(data_dir=eegnb_data_path, experiment=EXPERIMENT, site='eegnb_examples') + +recording_dir = get_recording_dir(DEVICE_NAME, EXPERIMENT, SUBJECT_ID, SESSION_NB, site=SITE) +print(f"[data] recording dir: {recording_dir}") + +all_files = sorted(p for p in recording_dir.glob('*.csv') if not p.stem.endswith('_timing')) +print(f"[data] found {len(all_files)} EEG recording(s): {[p.name for p in all_files]}") + +recording_files = [] +for p in all_files: + timing_path = p.with_name(p.stem + '_timing.csv') + if not timing_path.exists(): + print(f"[skip] No timing sidecar: {p.name}") + continue + n_rows = sum(1 for _ in open(p)) - 1 + dur_secs = n_rows / 250 + if dur_secs < MIN_RECORDING_SECS: + print(f"[skip] Too short ({dur_secs:.0f}s < {MIN_RECORDING_SECS}s): {p.name}") + continue + recording_files.append(p) + +print(f"[data] using {len(recording_files)} recording(s): {[p.name for p in recording_files]}") + +per_recording = [] +for p in recording_files: + timing_path = p.with_name(p.stem + '_timing.csv') + + rec_raw = load_csv_as_raw([str(p)], sfreq=250, ch_ind=EEG_INDICES['cyton'], + aux_ind=None, replace_ch_names=None, verbose=False) + + if CH_REMAP: + remap = {k: v for k, v in CH_REMAP.items() if k in rec_raw.ch_names} + if remap: + rec_raw.rename_channels(remap) + + rec_timing = pd.read_csv(timing_path, comment='#').reset_index(drop=True) + rec_events = find_events(rec_raw, shortest_event=1, verbose=False) + + n = min(len(rec_events), len(rec_timing)) + if len(rec_events) != len(rec_timing): + print(f"[warn] {p.name}: events={len(rec_events)}, timing={len(rec_timing)} — truncating to {n}") + per_recording.append({ + 'raw': rec_raw, + 'events': rec_events[:n], + 'timing': rec_timing.iloc[:n].reset_index(drop=True), + }) + +if not per_recording: + raise RuntimeError( + f"No recordings loaded from {recording_dir}. " + "Check SUBJECT_ID, SESSION_NB, DEVICE_NAME, and MIN_RECORDING_SECS." + ) + +raw, events = concatenate_raws( + [rec['raw'] for rec in per_recording], + events_list=[rec['events'] for rec in per_recording], +) +timing_df = pd.concat([rec['timing'] for rec in per_recording], ignore_index=True) +assert len(events) == len(timing_df), "per-file truncation should keep events and timing aligned" + +print(f"\n[raw] sfreq={raw.info['sfreq']} Hz, n_samples={raw.n_times}, duration={raw.times[-1]:.1f}s") +print(f"[raw] channels: {raw.ch_names}") + +############################################################################### +# ## Recording quality diagnostic +# +# Two-stage contact quality check: +# +# 1. **Raw CSV (here)** — std / drift / p99 per channel directly from the recorded CSV, before any MNE processing. Mean-subtracts before computing metrics so DC offset does not inflate the flags. Detects whether flagged channels are isolated contacts or shared across all channels (loose M1/SRB reference). +# 2. **Post-epoch baseline (below, after epoching)** — pre-stimulus baseline RMS per channel after filtering + referencing. Absolute values are interpretable here; provides SNR at Oz and a go/no-go recommendation. + +import sys, pathlib +sys.path.insert(0, str(pathlib.Path(globals()['_dh'][0]).resolve().parents[3])) +from eegnb.analysis.recording_quality import check_session + +_rq = check_session(recording_dir) +print(_rq['report']) + +if _rq['shared_ref_suspect']: + print() + print("=" * 60) + print("⚑ SHARED REFERENCE SUSPECT (M1/SRB loose)") + print(" All-channel noise inflation detected.") + print(" Every channel recorded through this reference is") + print(" compromised. Biomarkers that depend on absolute") + print(" amplitude or inter-channel ratios are unreliable.") + print(" Re-seat M1 and re-record before trusting results.") + print("=" * 60) +elif _rq['flagged_channels']: + print(f"\n⚑ Flagged channels: {', '.join(_rq['flagged_channels'])}") + print(" Isolated contact issue(s) — other channels are ok.") +else: + print("\nAll channels within normal range — contact quality ok.") + +############################################################################### +# ## Visualize the power spectrum +# +# + +raw.plot_psd() + +############################################################################### +# ## Filtering +# +# Use FIR (linear phase) rather than IIR to avoid frequency-dependent group delay, +# which would shift the P100 peak by an amount that depends on its spectral content, +# contaminating latency measurements. MNE's zero-phase FIR cancels even the constant +# delay so the filtered P100 sits at the same sample as the unfiltered one. +# Using ISCEV bandpass standard: 1–100 Hz. +# +# +# + +hp, lp = 1, 100 +raw.filter(hp, lp, method='fir') + +############################################################################### +# ## Per-trial PC-side photon-latency correction +# +# Each event sample index is shifted by the per-trial measured +# ``app_motion_to_photon_latency_s`` from LibOVR frame stats — a retrospective +# measurement of how long the frame actually took to reach the compositor/vsync. +# +# Missing trials (typically the first frame or two before perf stats are populated) +# fall back to the session mean. +# +# +# + +pc_lag_s = timing_df['app_motion_to_photon_latency_s'].values.astype(float) +valid = np.isfinite(pc_lag_s) & (pc_lag_s > 0) +if (~valid).any(): + fallback = pc_lag_s[valid].mean() if valid.any() else 0.0 + print(f"[warn] {int((~valid).sum())}/{len(pc_lag_s)} trials missing " + f"app_motion_to_photon_latency_s — using mean fallback {fallback*1000:.2f} ms") + pc_lag_s = np.where(valid, pc_lag_s, fallback) + +sample_shifts = np.round(pc_lag_s * raw.info['sfreq']).astype(int) +print(f"\n[pc-lag] app_motion_to_photon_latency_s (ms): " + f"min={pc_lag_s.min()*1000:.2f} " + f"max={pc_lag_s.max()*1000:.2f} " + f"mean={pc_lag_s.mean()*1000:.2f} " + f"std={pc_lag_s.std()*1000:.2f} " + f"|shift| samples: max={np.abs(sample_shifts).max()}") + +# Per-trial PC-side-corrected event array. +events_corrected = events.copy() +events_corrected[:, 0] += sample_shifts + +############################################################################### +# ## Hardware lag breakdown chart +# +# + +pc_pipeline_lag = pc_lag_s.mean() * 1000 +unmeasured_lag = link_panel_lag * 1000 + +fig_lag, ax_lag = plt.subplots(figsize=(8, 4)) +y_pos = 0 + +ax_lag.barh(y_pos, pc_pipeline_lag, color='#4c72b0', edgecolor='white', + label=f'PC Pipeline (measured): {pc_pipeline_lag:.1f} ms') +ax_lag.barh(y_pos, unmeasured_lag, left=pc_pipeline_lag, color='#c44e52', edgecolor='white', + label=f'Quest Link + Panel + Cyton RF (unmeasured): {unmeasured_lag:.1f} ms') + +ax_lag.set_yticks([]) +ax_lag.set_xlabel('Latency from Trigger (ms)') +ax_lag.set_title('Composition of VEP Hardware Lag') + +ax_lag.errorbar(pc_pipeline_lag / 2, y_pos, xerr=pc_lag_s.std() * 1000, + color='#aec6e8', capsize=5, lw=2, label='Measured Variance (±1 SD)') +ax_lag.errorbar(pc_pipeline_lag + (unmeasured_lag / 2), y_pos, + xerr=link_panel_lag_err * 1000, + color='black', capsize=5, lw=2, + label=f'Unmeasured Uncertainty (±{link_panel_lag_err*1000:.0f}ms)') + +handles, labels = ax_lag.get_legend_handles_labels() +fig_lag.legend(handles, labels, loc='lower center', bbox_to_anchor=(0.5, 0.0), + ncol=2, fontsize=8, frameon=True) +fig_lag.subplots_adjust(bottom=0.38) + +############################################################################### +# ## Condition decoding +# +# Marker scheme: +# Reversal codes carry both eye and size (1–4). +# Block-start markers (100-103) are pushed at the start of each block but are redundant for trial epoching. +# +# Condition codes: + +COND_TO_INT = { + ('left_eye', 'large'): 1, + ('right_eye', 'large'): 2, + ('left_eye', 'small'): 3, + ('right_eye', 'small'): 4, +} + +# Drop block-start markers (100-103), keeping only the actual reversal markers (1-4). +mask = np.isin(events[:, 2], list(COND_TO_INT.values())) +events = events[mask] +events_corrected = events_corrected[mask] + +############################################################################### +# ## Epoching parameters +# + +event_id = {f"{eye}/{size}": code for (eye, size), code in COND_TO_INT.items()} +for cond, cid in event_id.items(): + n = int((events[:, 2] == cid).sum()) + print(f"[events] {cond}: {n}") + +PICK_CH = 'Oz' # ISCEV-standard electrode +HEMI_CHANNELS = ['O1', 'O2'] # Hemispheric channels for post-chiasmatic analysis +LATERAL_CHANNELS = ['P7', 'P8'] # Lateral extrastriate (V2/V3/MT). Generators on the + # lateral cortical surface project mostly ipsilaterally + # to the scalp, so P7/P8 are far less affected by the + # paradoxical lateralization that makes O1/O2 ambiguous + # for hemispheric localization. This makes P7/P8 a + # relatively direct readout of unilateral lateral- + # occipital / parieto-occipital cortex — useful for + # detecting localized retro-chiasmatic / extrastriate + # involvement (cortical lesions, focal hypofunction) + # that Oz / O1 / O2 alone cannot cleanly side-localize. +# Topology QC channels: Pz gradient + Fz Halliday inversion. Fz is absent +# when it is the hardware reference (session 016+), so filter against what's +# actually recorded to avoid pick errors downstream. +TOPOLOGY_CHANNELS = [ch for ch in ['Pz', 'Fz'] if ch in raw.ch_names] +ALL_PICK_CHANNELS = [PICK_CH] + HEMI_CHANNELS + LATERAL_CHANNELS + TOPOLOGY_CHANNELS +REJECT_UV = 150e-6 +BASELINE = (-0.1, 0) + +LANDMARK_MS = [75, 100, 145] # N75, P100, N145 +LANDMARK_COLORS = ['#888888', 'green', '#555555'] +LANDMARK_LABELS = ['N75 (75 ms)', 'P100 (100 ms)', 'N145 (145 ms)'] + +P100_WIN_MS = (60, 160) # P100 search window (positive max) +CHECK_SIZE_ARCMIN = { + 'large': ISCEV_CHECK_DEG_LARGE * 60.0, # 60 arcmin + 'small': ISCEV_CHECK_DEG_SMALL * 60.0, # 15 arcmin +} + +############################################################################### +# ## Reference scheme selector +# +# Pick which EEG reference to analyse in this notebook run. Re-run the notebook +# with the other value to also persist its biomarkers (the persistence cell at +# the bottom merges into ``biomarkers.json`` rather than overwriting). +# +# - **Fz (ISCEV strict)**: Oz-Fz derivation. The ISCEV 2016 PR-VEP standard +# recommends a mid-frontal reference (Fz), so this scheme is the choice for +# direct comparison against published clinical norms. Trade-off: very +# sensitive to Fz contact quality on dry-electrode rigs -- one bad Fz +# contact can subtract artifact into Oz and invert P100 polarity. +# - **Linked Mastoid M1+M2**: ISCEV lists ear/mastoid as an acceptable +# alternative reference; widely used in cognitive-neuroscience ERP work +# because mastoids sit on bony prominence with less EMG and are far less +# contact-noise-prone than Fz on dry actives. Absolute P100 latency / +# amplitude differ slightly from the Fz-referenced waveform, so direct +# comparison to Fz-referenced clinical norms is approximate; differential +# biomarkers (IOLD, slope, asymmetry) are reference-invariant and remain +# directly comparable. Required (not optional) for Biomarker 12's Halliday +# Fz-inversion check, which needs Fz as a recorded channel rather than the +# reference. +# +# M1 is the Cyton hardware reference (SRB pin), so stored channel data is +# already relative to M1. A zero-valued M1 channel is synthesised and averaged +# with the recorded M2, giving Oz-(M1+M2)/2 after the algebra. +# + +REF_SCHEME = 'Fz (ISCEV)' # 'Fz (ISCEV)' or 'Linked Mastoid M1+M2' + +if REF_SCHEME == 'Fz (ISCEV)': + raw_ref = raw.copy() + if 'Fz' in raw_ref.ch_names: + # Fz is a recorded channel — subtract it as software reference + raw_ref.set_eeg_reference(ref_channels=['Fz']) + else: + # Fz is the hardware SRB — data already in Fz space, no software step needed + print("[ref] Fz is hardware reference — no software re-reference applied") +elif REF_SCHEME == 'Linked Mastoid M1+M2': + raw_ref = raw.copy() + if 'M1' not in raw_ref.ch_names: + # M1 is the SRB (hardware reference) — synthesise it as zero so the + # algebra (channel − M2/2) approximates linked mastoid + m1_zero = raw_ref.copy().pick(['M2']) + m1_zero._data[:] = 0 + m1_zero.rename_channels({'M2': 'M1'}) + raw_ref.add_channels([m1_zero]) + # M1 is a real recorded channel (session 016+) or the synthesised zero above + raw_ref.set_eeg_reference(ref_channels=['M1', 'M2']) +else: + raise ValueError(f'Unknown REF_SCHEME: {REF_SCHEME!r}') + +ref_label = REF_SCHEME +results = {'ref_label': ref_label} +print(f"\n{'='*60}\nReference: {ref_label}\n{'='*60}") + +raw_ref.compute_psd(fmin=hp, fmax=lp).plot() + +############################################################################### +# ## Epoching +# + +ch_epochs = Epochs(raw_ref, events=events, event_id=event_id, + tmin=-0.1, tmax=0.4, baseline=BASELINE, + reject={'eeg': REJECT_UV}, + preload=True, verbose=False, picks=ALL_PICK_CHANNELS, + event_repeated='drop') +ch_epochs.shift_time(-link_panel_lag) + +n_total = len(ch_epochs) +drop_pct = (1 - n_total / len(events)) * 100 +print(f"\n[{PICK_CH}] reject ptp={REJECT_UV * 1e6:.0f} uV " + f"kept {n_total}/{len(events)} ({drop_pct:.1f}% dropped)") +results['n_trials_total'] = int(len(events)) +results['n_trials_kept'] = int(n_total) +results['drop_pct'] = _f(drop_pct) +results['n_per_condition'] = { + cond: int((events[:, 2] == cid).sum()) for cond, cid in event_id.items() +} + +# Corrected-events epochs on the same kept trials. +ch_epochs_corr = Epochs(raw_ref, events=events_corrected[ch_epochs.selection], + event_id=event_id, tmin=-0.1, tmax=0.4, baseline=BASELINE, + reject=None, preload=True, verbose=False, picks=ALL_PICK_CHANNELS, + event_repeated='drop') +ch_epochs_corr.shift_time(-link_panel_lag) + +def avg_eyes(ep, eye_prefix): + keys = [k for k in event_id if k.startswith(eye_prefix)] + return trimmed_average(ep[keys]) if keys else None + +# ========================================================================= +# WAVEFORM PLOTS +# ========================================================================= + +"" +############################################################################### +# Stage 2 — Post-epoch baseline quality (filtered + referenced) +# Absolute values are meaningful here. Baseline window: -100 to 0 ms. +############################################################################### + +BASELINE_WIN = (-0.1, 0.0) +NOISE_FACTOR_EP = 1.5 # flag if channel baseline RMS > this × group median +OZ_SNR_MIN = 2.0 # flag if Oz P100 SNR falls below this + +baseline_mask = (ch_epochs_corr.times >= BASELINE_WIN[0]) & \ + (ch_epochs_corr.times <= BASELINE_WIN[1]) + +baseline_data = ch_epochs_corr.get_data()[:, :, baseline_mask] # (epochs, ch, times) +baseline_rms = np.sqrt(np.mean(baseline_data ** 2, axis=(0, 2))) * 1e6 # µV per channel + +ch_names_ep = ch_epochs_corr.ch_names +med_rms = float(np.median(baseline_rms)) + +print("Stage 2 — Baseline RMS per channel (post-filter, post-reference, -100–0 ms)") +print(f"{'Channel':<8} {'RMS µV':>8} {'Factor':>8} Status") +print("-" * 44) +quality_flags = {} +for ch, rms in zip(ch_names_ep, baseline_rms): + factor = rms / med_rms + flag = factor > NOISE_FACTOR_EP + quality_flags[ch] = {'rms_uv': round(float(rms), 2), 'flag': flag} + print(f"{ch:<8} {rms:>8.2f} {factor:>8.2f} {'⚑ FLAG' if flag else 'ok'}") + +# Oz SNR: best detected P100 / Oz baseline RMS +oz_idx = ch_names_ep.index('Oz') +oz_rms = baseline_rms[oz_idx] +print(f"\nOz baseline RMS = {oz_rms:.2f} µV group median = {med_rms:.2f} µV") + +flagged_chs = [ch for ch, v in quality_flags.items() if v['flag']] +if flagged_chs: + print(f"\n⚑ Noisy channels (>{NOISE_FACTOR_EP}× median): {flagged_chs}") + if len(flagged_chs) == len(ch_names_ep): + print(" All channels elevated → shared reference (M2/SRB) is likely the cause.") + else: + print(" Subset of channels → individual electrode contact issue(s).") +else: + print(f"\nAll channels within {NOISE_FACTOR_EP}× median baseline — contact quality ok.") + +############################################################################### +# ## Oz evoked: left vs right eye +# +# Solid lines: per-trial PC lag corrected. Dotted lines: mean-corrected baseline. +# Shaded regions: ±1 SEM across trials. +# + +evoked_left_large = trimmed_average(ch_epochs['left_eye/large']) +evoked_right_large = trimmed_average(ch_epochs['right_eye/large']) +evoked_left_small = trimmed_average(ch_epochs['left_eye/small']) +evoked_right_small = trimmed_average(ch_epochs['right_eye/small']) + +idx_oz = evoked_left_large.ch_names.index(PICK_CH) + +times = evoked_left_large.times * 1000 +left_data_large = evoked_left_large.data[idx_oz] * 1e6 +right_data_large = evoked_right_large.data[idx_oz] * 1e6 +left_data_small = evoked_left_small.data[idx_oz] * 1e6 +right_data_small = evoked_right_small.data[idx_oz] * 1e6 + +times_mean_corr = times - (pc_lag_s.mean() * 1000) + +evoked_left_corr_large = trimmed_average(ch_epochs_corr['left_eye/large']) +evoked_right_corr_large = trimmed_average(ch_epochs_corr['right_eye/large']) +left_corr_large = evoked_left_corr_large.data[idx_oz] * 1e6 +right_corr_large = evoked_right_corr_large.data[idx_oz] * 1e6 + +evoked_left_corr_small = trimmed_average(ch_epochs_corr['left_eye/small']) +evoked_right_corr_small = trimmed_average(ch_epochs_corr['right_eye/small']) +left_corr_small = evoked_left_corr_small.data[idx_oz] * 1e6 +right_corr_small = evoked_right_corr_small.data[idx_oz] * 1e6 + +left_trials_large = ch_epochs_corr['left_eye/large'].get_data(picks=[PICK_CH])[:, 0, :] * 1e6 +right_trials_large = ch_epochs_corr['right_eye/large'].get_data(picks=[PICK_CH])[:, 0, :] * 1e6 +left_sem_large = left_trials_large.std(axis=0) / np.sqrt(len(left_trials_large)) +right_sem_large = right_trials_large.std(axis=0) / np.sqrt(len(right_trials_large)) + +left_trials_small = ch_epochs_corr['left_eye/small'].get_data(picks=[PICK_CH])[:, 0, :] * 1e6 +right_trials_small = ch_epochs_corr['right_eye/small'].get_data(picks=[PICK_CH])[:, 0, :] * 1e6 +left_sem_small = left_trials_small.std(axis=0) / np.sqrt(len(left_trials_small)) +right_sem_small = right_trials_small.std(axis=0) / np.sqrt(len(right_trials_small)) + +# Size-averaged evokeds used as primary input for most biomarkers. +evoked_left_corr_avg = avg_eyes(ch_epochs_corr, 'left_eye') +evoked_right_corr_avg = avg_eyes(ch_epochs_corr, 'right_eye') +n75_left, p100_left, n145_left = get_pr_vep_latencies(evoked_left_corr_avg.copy().pick([PICK_CH])) +n75_right, p100_right, n145_right = get_pr_vep_latencies(evoked_right_corr_avg.copy().pick([PICK_CH])) + +for eye_name, peaks in [('Left Eye (Avg)', (n75_left, p100_left, n145_left)), + ('Right Eye (Avg)', (n75_right, p100_right, n145_right))]: + for peak in peaks: + if peak is not None: + print(f"[{eye_name}] {peak['name']} Peak: " + f"{round(peak['amplitude']*1e6, 2)} µV at " + f"{round(peak['latency']*1e3, 2)} ms (ch={peak['channel']})") + + n75, p100, n145 = peaks + if n75 is not None and p100 is not None: + ptp_1 = (p100['amplitude'] - n75['amplitude']) * 1e6 + print(f"[{eye_name}] N75-P100 Peak-to-Peak: {ptp_1:.2f} µV") + if p100 is not None and n145 is not None: + ptp_2 = (p100['amplitude'] - n145['amplitude']) * 1e6 + print(f"[{eye_name}] P100-N145 Peak-to-Peak: {ptp_2:.2f} µV") + if n75 is not None and p100 is not None and n145 is not None: + print(f"[{eye_name}] Total N75-P100-N145 Energy: {ptp_1 + ptp_2:.2f} µV") + +def plot_ch(ax, data, color, eye_label, sem=None, data_mean_corr=None, times_mean_corr=None): + ax.plot(times, data, color=color, linewidth=2, label=f'{eye_label} (Per-trial corrected)') + if data_mean_corr is not None and times_mean_corr is not None: + ax.plot(times_mean_corr, data_mean_corr, color=color, linestyle=':', alpha=0.6, + linewidth=1.6, label=f'{eye_label} (Mean corrected)') + if sem is not None: + ax.fill_between(times, data - sem, data + sem, color=color, alpha=0.25, + label=f'{eye_label} ±1 SEM') + ax.set_xlabel('Time (ms)') + ax.set_ylabel('Amplitude (µV)') + ax.grid(True, alpha=0.3) + ax.axhline(y=0, color='black', linestyle='-', alpha=0.3) + ax.axvline(x=0, color='black', linestyle='--', alpha=0.5) + +fig_large, (ax_left_large, ax_right_large) = plt.subplots(1, 2, figsize=(16, 6), sharey=True) + +plot_ch(ax_left_large, left_corr_large, 'blue', 'Left Eye', sem=left_sem_large, + data_mean_corr=left_data_large, times_mean_corr=times_mean_corr) +for ms, col, lbl in zip(LANDMARK_MS, LANDMARK_COLORS, LANDMARK_LABELS): + ax_left_large.axvline(x=ms, color=col, linestyle='--', alpha=0.6, label=lbl) +ax_left_large.set_title(f'[{ref_label}] Large Checks: Left Eye — {PICK_CH}') +handles, lbls = ax_left_large.get_legend_handles_labels() +ax_left_large.legend(dict(zip(lbls, handles)).values(), dict(zip(lbls, handles)).keys(), + fontsize=10, loc='upper right') + +plot_ch(ax_right_large, right_corr_large, 'red', 'Right Eye', sem=right_sem_large, + data_mean_corr=right_data_large, times_mean_corr=times_mean_corr) +for ms, col, lbl in zip(LANDMARK_MS, LANDMARK_COLORS, LANDMARK_LABELS): + ax_right_large.axvline(x=ms, color=col, linestyle='--', alpha=0.6, label=lbl) +ax_right_large.set_title(f'[{ref_label}] Large Checks: Right Eye — {PICK_CH}') +handles, lbls = ax_right_large.get_legend_handles_labels() +ax_right_large.legend(dict(zip(lbls, handles)).values(), dict(zip(lbls, handles)).keys(), + fontsize=10, loc='upper right') + +fig_large.tight_layout() +plt.show() + +############################################################################### +# ## Oz evoked: small checks +# + +fig_small, (ax_left_small, ax_right_small) = plt.subplots(1, 2, figsize=(16, 6), sharey=True) + +plot_ch(ax_left_small, left_corr_small, 'blue', 'Left Eye', sem=left_sem_small, + data_mean_corr=left_data_small, times_mean_corr=times_mean_corr) +for ms, col, lbl in zip(LANDMARK_MS, LANDMARK_COLORS, LANDMARK_LABELS): + ax_left_small.axvline(x=ms, color=col, linestyle='--', alpha=0.6, label=lbl) +ax_left_small.set_title(f'[{ref_label}] Small Checks: Left Eye — {PICK_CH}') +handles, lbls = ax_left_small.get_legend_handles_labels() +ax_left_small.legend(dict(zip(lbls, handles)).values(), dict(zip(lbls, handles)).keys(), + fontsize=10, loc='upper right') + +plot_ch(ax_right_small, right_corr_small, 'red', 'Right Eye', sem=right_sem_small, + data_mean_corr=right_data_small, times_mean_corr=times_mean_corr) +for ms, col, lbl in zip(LANDMARK_MS, LANDMARK_COLORS, LANDMARK_LABELS): + ax_right_small.axvline(x=ms, color=col, linestyle='--', alpha=0.6, label=lbl) +ax_right_small.set_title(f'[{ref_label}] Small Checks: Right Eye — {PICK_CH}') +handles, lbls = ax_right_small.get_legend_handles_labels() +ax_right_small.legend(dict(zip(lbls, handles)).values(), dict(zip(lbls, handles)).keys(), + fontsize=10, loc='upper right') + +fig_small.tight_layout() +plt.show() + +############################################################################### +# ## Occipital channel comparison (Oz, O1, O2) — size-averaged per eye +# +# O1/O2 alongside Oz on the same axes. Subject0000 has a larger right occipital +# lobe that crosses the midline. Due to paradoxical lateralization (right V1 +# dipole projects to left scalp), this predicts O1 > O2 amplitude as a +# baseline anatomical effect independent of pathology. Confirming this here +# separates the structural asymmetry from any eye-dependent lesion signal. +# + +fig_o1o2, axes_o1o2 = plt.subplots(1, 2, figsize=(16, 6), sharey=True) +occ_styles = { + 'Oz': dict(color='black', lw=2.5, ls='-', alpha=1.0), + 'O1': dict(color='#9467bd', lw=1.8, ls='--', alpha=0.85), + 'O2': dict(color='#e377c2', lw=1.8, ls='--', alpha=0.85), +} + +for ax, (eye_name, ev_avg) in zip(axes_o1o2, + [('Left Eye', evoked_left_corr_avg), + ('Right Eye', evoked_right_corr_avg)]): + if ev_avg is None: + continue + t_occ = ev_avg.times * 1000 + for ch, style in occ_styles.items(): + if ch not in ev_avg.ch_names: + continue + idx = ev_avg.ch_names.index(ch) + ax.plot(t_occ, ev_avg.data[idx] * 1e6, label=ch, **style) + + for ms, col, lbl in zip(LANDMARK_MS, LANDMARK_COLORS, LANDMARK_LABELS): + ax.axvline(ms, color=col, linestyle='--', alpha=0.5, + label=lbl if ax == axes_o1o2[0] else '') + ax.set_title(f'[{ref_label}] Occipital channels — {eye_name} (size-averaged)') + ax.set_xlabel('Time (ms)') + ax.axhline(0, color='black', alpha=0.3) + ax.axvline(0, color='black', linestyle='--', alpha=0.5) + if ax == axes_o1o2[0]: + ax.set_ylabel('Amplitude (µV)') + ax.grid(True, alpha=0.3) + handles, lbls = ax.get_legend_handles_labels() + ax.legend(dict(zip(lbls, handles)).values(), dict(zip(lbls, handles)).keys(), + fontsize=10, loc='upper right') + +fig_o1o2.tight_layout() +plt.show() + +############################################################################### +# ## Inter-ocular difference wave at Oz +# +# The full time course of the L−R contrast. Surfaces morphology effects that +# single-point biomarkers miss: delayed second peaks on the affected eye, +# split / W-shaped P100 from partially demyelinated fibres, broadening of +# the P100. Zero baseline = eyes match; sign convention follows IOLD (positive +# = left-eye delay near the P100 peak). +# + +fig_diff, (ax_diff_large, ax_diff_small) = plt.subplots(1, 2, figsize=(16, 6), sharey=True) + +for ax, size_label, l_data, r_data in [ + (ax_diff_large, 'Large', left_corr_large, right_corr_large), + (ax_diff_small, 'Small', left_corr_small, right_corr_small), +]: + diff = l_data - r_data + ax.plot(times, l_data, color='blue', alpha=0.25, linewidth=1.2, label='Left Eye') + ax.plot(times, r_data, color='red', alpha=0.25, linewidth=1.2, label='Right Eye') + ax.plot(times, diff, color='#7d3c98', linewidth=2.5, label='L − R difference') + ax.fill_between(times, 0, diff, color='#7d3c98', alpha=0.15) + + for ms, col, lbl in zip(LANDMARK_MS, LANDMARK_COLORS, LANDMARK_LABELS): + ax.axvline(x=ms, color=col, linestyle='--', alpha=0.6, label=lbl) + + ax.set_title(f'[{ref_label}] {size_label} Checks: L − R Difference Wave — {PICK_CH}') + ax.set_xlabel('Time (ms)') + ax.set_ylabel('Amplitude (µV)') + ax.grid(True, alpha=0.3) + ax.axhline(0, color='black', alpha=0.3) + ax.axvline(0, color='black', linestyle='--', alpha=0.5) + handles, lbls = ax.get_legend_handles_labels() + ax.legend(dict(zip(lbls, handles)).values(), dict(zip(lbls, handles)).keys(), + fontsize=10, loc='upper right') + +fig_diff.tight_layout() +plt.show() + +############################################################################### +# ## Diagnostic: jitter-correction impact +# +# Compares the P100 sharpness with and without per-trial PC lag correction. +# + +evoked_left_uncorr = avg_eyes(ch_epochs, 'left_eye') +evoked_right_uncorr = avg_eyes(ch_epochs, 'right_eye') + +if all(x is not None for x in [evoked_left_corr_avg, evoked_right_corr_avg, + evoked_left_uncorr, evoked_right_uncorr]): + fig_jitter, axes_j = plt.subplots(1, 2, figsize=(16, 6), sharey=True) + + for ax, eye_name, ev_u, ev_c, color in zip( + axes_j, ['Left Eye', 'Right Eye'], + [evoked_left_uncorr, evoked_right_uncorr], + [evoked_left_corr_avg, evoked_right_corr_avg], + ['blue', 'red'] + ): + t_u = ev_u.times * 1000 + t_c = ev_c.times * 1000 + i_oz = ev_u.ch_names.index(PICK_CH) + + ax.plot(t_u, ev_u.data[i_oz] * 1e6, color='gray', linestyle='--', linewidth=2, + label='Uncorrected') + ax.plot(t_c, ev_c.data[i_oz] * 1e6, color=color, linewidth=2, + label='PC-lag corrected') + + for ms, col, lbl in zip(LANDMARK_MS, LANDMARK_COLORS, LANDMARK_LABELS): + ax.axvline(x=ms, color=col, linestyle=':', alpha=0.5, + label=lbl if ax == axes_j[0] else '') + + ax.set_title(f'[{ref_label}] {eye_name}: Jitter Correction Impact — {PICK_CH}') + ax.set_xlabel('Time (ms)') + if ax == axes_j[0]: + ax.set_ylabel('Amplitude (µV)') + ax.axhline(0, color='black', alpha=0.3) + ax.axvline(0, color='black', linestyle='--', alpha=0.5) + handles, lbls = ax.get_legend_handles_labels() + ax.legend(dict(zip(lbls, handles)).values(), dict(zip(lbls, handles)).keys(), + loc='upper right') + + fig_jitter.tight_layout() + plt.show() + +############################################################################### +# ## Diagnostic: estimator robustness +# +# Overlays single trials to reveal outlier contamination (e.g., blinks), then +# compares standard mean, median, and 10%-trimmed mean estimators. +# + +fig_est, axes_e = plt.subplots(1, 2, figsize=(16, 6), sharey=True) + +for ax, eye_prefix, color in zip(axes_e, ['left_eye', 'right_eye'], ['blue', 'red']): + keys = [k for k in event_id if k.startswith(eye_prefix)] + if not keys: + continue + + ep = ch_epochs_corr[keys].copy().pick([PICK_CH]) + data = ep.get_data() * 1e6 + if data.shape[0] == 0: + continue + + data = data[:, 0, :] + times_e = ep.times * 1000 + + subset = data[:100] if data.shape[0] > 100 else data + for trial in subset: + ax.plot(times_e, trial, color='gray', alpha=0.08, linewidth=0.5) + + ax.plot(times_e, np.mean(data, axis=0), color='orange', linestyle='--', + linewidth=2, label='Standard mean') + ax.plot(times_e, np.median(data, axis=0), color='green', linestyle='-.', + linewidth=2, label='Median') + ax.plot(times_e, trim_mean(data, 0.1, axis=0), color=color, + linewidth=3, label='10% Trimmed mean') + + for ms, col, lbl in zip(LANDMARK_MS, LANDMARK_COLORS, LANDMARK_LABELS): + ax.axvline(x=ms, color=col, linestyle=':', alpha=0.5, + label=lbl if ax == axes_e[0] else '') + + ax.set_title(f'[{ref_label}] {eye_prefix.replace("_", " ").title()}: ' + f'Single Trials & Estimators — {PICK_CH}') + ax.set_xlabel('Time (ms)') + if ax == axes_e[0]: + ax.set_ylabel('Amplitude (µV)') + ax.set_ylim(-30, 30) + ax.axhline(0, color='black', alpha=0.3) + ax.axvline(0, color='black', linestyle='--', alpha=0.5) + handles, lbls = ax.get_legend_handles_labels() + ax.legend(dict(zip(lbls, handles)).values(), dict(zip(lbls, handles)).keys(), + loc='upper right') + +fig_est.tight_layout() +plt.show() + +############################################################################### +# ## Diagnostic: multi-channel topography +# +# Displays the trimmed-mean waveform across all posterior channels to confirm +# the Oz > O1/O2 > Pz generator gradient expected from a V1 source. +# + +fig_mc, axes_mc = plt.subplots(1, 2, figsize=(16, 6), sharey=True) + +plot_channels = [ch for ch in ['Oz', 'O1', 'O2', 'P7', 'P8', 'Pz'] + if ch in ch_epochs_corr.ch_names] +ch_colors = {'Oz': 'black', 'O1': '#9467bd', 'O2': '#e377c2', + 'P7': '#1f77b4', 'P8': '#d62728', 'Pz': '#2ca02c'} + +for ax, eye_name, ev_avg in zip(axes_mc, ['Left Eye', 'Right Eye'], + [evoked_left_corr_avg, evoked_right_corr_avg]): + if ev_avg is None: + continue + + t_mc = ev_avg.times * 1000 + for ch in plot_channels: + idx = ev_avg.ch_names.index(ch) + lw = 3 if ch == 'Oz' else 1.5 + alpha = 1.0 if ch == 'Oz' else 0.7 + ax.plot(t_mc, ev_avg.data[idx] * 1e6, + color=ch_colors.get(ch, 'gray'), linewidth=lw, alpha=alpha, label=ch) + + for ms, col, lbl in zip(LANDMARK_MS, LANDMARK_COLORS, LANDMARK_LABELS): + ax.axvline(x=ms, color=col, linestyle=':', alpha=0.5, + label=lbl if ax == axes_mc[0] else '') + + ax.set_title(f'[{ref_label}] {eye_name}: Multi-Channel Topography') + ax.set_xlabel('Time (ms)') + if ax == axes_mc[0]: + ax.set_ylabel('Amplitude (µV)') + ax.axhline(0, color='black', alpha=0.3) + ax.axvline(0, color='black', linestyle='--', alpha=0.5) + handles, lbls = ax.get_legend_handles_labels() + ax.legend(dict(zip(lbls, handles)).values(), dict(zip(lbls, handles)).keys(), + loc='upper right') + +fig_mc.tight_layout() +plt.show() + +# ========================================================================= +# SECTION 1: Pre-chiasmatic / Optic Nerve Biomarkers +# +# BM1–BM5 all measure the L−R optic-nerve contrast at Oz. Because both eyes +# share the same Quest → Cyton signal chain the unmeasured residual lags +# cancel in every L−R difference, making these biomarkers robust to the +# absolute timing uncertainty in this setup. +# ========================================================================= +print("\n" + "="*70) +print("SECTION 1 — Pre-chiasmatic / Optic Nerve") +print("="*70) + +############################################################################### +# ## BM1 — IOLD: inter-ocular P100 latency difference (pooled) +# +# The signed L−R difference in P100 latency at Oz (size-averaged evoked). +# > 6–8 ms is the most-cited clinical threshold for unilateral demyelination. +# + +results['iold'] = compute_iold(p100_left, p100_right) + +print("\n--- BM1: IOLD (pooled) ---") +if results['iold'] is None: + print("[BM1] Cannot compute — P100 not detected in one or both eyes") +else: + d = results['iold'] + direction = "left delayed" if d['iold_ms'] > 0 else "right delayed" + status = 'FLAG' if d['flag'] else f'within ±{IOLD_FLAG_MS:.0f} ms' + print(f"[BM1] P100 Left = {d['p100_left_ms']:.2f} ms Right = {d['p100_right_ms']:.2f} ms") + print(f"[BM1] IOLD L−R = {d['iold_ms']:+.2f} ms ({direction}) [{status}]") + + fig_bm1, ax_bm1 = plt.subplots(figsize=(6, 5)) + bar_vals = [d['p100_left_ms'], d['p100_right_ms']] + bars = ax_bm1.bar([0, 1], bar_vals, color=['#1f77b4', '#d62728'], alpha=0.75, width=0.45) + y_top = max(bar_vals) + 5 + ax_bm1.annotate('', xy=(1, y_top), xytext=(0, y_top), + arrowprops=dict(arrowstyle='<->', color='#7d3c98', lw=2)) + flag_marker = ' ⚑ FLAG' if d['flag'] else '' + ax_bm1.text(0.5, y_top + 1.5, f'IOLD = {d["iold_ms"]:+.1f} ms{flag_marker}', + ha='center', va='bottom', color='#7d3c98', fontsize=11, fontweight='bold') + # shade the ±threshold band around right-eye bar to give visual reference + ref = d['p100_right_ms'] + ax_bm1.axhspan(ref - IOLD_FLAG_MS, ref + IOLD_FLAG_MS, + alpha=0.10, color='orange', label=f'±{IOLD_FLAG_MS:.0f} ms threshold band') + ax_bm1.set_xticks([0, 1]) + ax_bm1.set_xticklabels(['Left Eye', 'Right Eye'], fontsize=12) + ax_bm1.set_ylabel('P100 latency at Oz (ms)') + ax_bm1.set_title(f'[{ref_label}] BM1: IOLD — inter-ocular P100 latency') + ax_bm1.set_ylim(0, y_top + 8) + ax_bm1.legend(fontsize=9) + ax_bm1.grid(axis='y', alpha=0.3) + fig_bm1.tight_layout() + plt.show() + +############################################################################### +# ## BM2 — IOLD per check size +# +# Demyelination preferentially delays high-spatial-frequency (small-check) +# responses, so the per-size IOLD often surfaces lateralised dysfunction that +# the size-pooled IOLD averages out. +# + +results['iold_per_size'] = compute_iold_per_size(ch_epochs_corr, event_id, PICK_CH) + +print("\n--- BM2: IOLD per check size ---") +for size, d in results['iold_per_size'].items(): + if d is None: + print(f"[BM2/{size}] Cannot compute — P100 not detected at one or both eyes") + continue + direction = "left delayed" if d['iold_ms'] > 0 else "right delayed" + status = 'FLAG' if d['flag'] else f'within ±{IOLD_FLAG_MS:.0f} ms' + print(f"[BM2/{size}] P100 Left = {d['p100_left_ms']:.2f} ms Right = {d['p100_right_ms']:.2f} ms") + print(f"[BM2/{size}] IOLD L−R = {d['iold_ms']:+.2f} ms ({direction}) [{status}]") + +# Grouped bar: L/R per check size +sizes_present = [s for s, d in results['iold_per_size'].items() if d is not None] +if sizes_present: + fig_bm2, ax_bm2 = plt.subplots(figsize=(8, 5)) + x2 = np.arange(len(sizes_present)) + w2 = 0.32 + for i, (eye_prefix, color, eye_label) in enumerate( + [('left', '#1f77b4', 'Left Eye'), ('right', '#d62728', 'Right Eye')]): + lats = [results['iold_per_size'][s][f'p100_{eye_prefix}_ms'] for s in sizes_present] + ax_bm2.bar(x2 + (i - 0.5) * w2, lats, w2, color=color, alpha=0.75, label=eye_label) + + for j, size in enumerate(sizes_present): + d = results['iold_per_size'][size] + if d and d.get('iold_ms') is not None: + flag_str = ' ⚑' if d['flag'] else '' + top = max(d['p100_left_ms'], d['p100_right_ms']) + ax_bm2.text(j, top + 1.5, f'IOLD\n{d["iold_ms"]:+.1f}ms{flag_str}', + ha='center', fontsize=9, color='#7d3c98', fontweight='bold') + + ax_bm2.set_xticks(x2) + ax_bm2.set_xticklabels([f'{s.title()} checks\n({CHECK_SIZE_ARCMIN[s]:.0f} arcmin)' + for s in sizes_present], fontsize=11) + ax_bm2.set_ylabel('P100 latency at Oz (ms)') + ax_bm2.set_title(f'[{ref_label}] BM2: IOLD per check size') + ax_bm2.legend() + ax_bm2.grid(axis='y', alpha=0.3) + fig_bm2.tight_layout() + plt.show() + +############################################################################### +# ## BM3 — Check-size slope +# +# Per-eye P100 latency slope vs. check size (ms / arcmin). Demyelination +# preferentially delays high-spatial-frequency (small-check) responses, so the +# L−R slope difference amplifies asymmetric demyelination beyond what a +# single-check IOLD captures. +# +# Check-size mapping: +# large → 1.0 deg = 60 arcmin (ISCEV "large check") +# small → 0.25 deg = 15 arcmin (ISCEV "small check") +# + +results['slope'] = compute_check_size_slope( + ch_epochs_corr, event_id, PICK_CH, CHECK_SIZE_ARCMIN, +) +s = results['slope'] + +print("\n--- BM3: Check-size slope ---") +for cond_key, lat_ms in s['per_condition_p100_ms'].items(): + print(f"[BM3] {cond_key}: P100 = {lat_ms:.2f} ms") +if s['slope_left_ms_per_arcmin'] is not None: + print(f"[BM3] Left eye slope: {s['slope_left_ms_per_arcmin']:+.4f} ms/arcmin") +if s['slope_right_ms_per_arcmin'] is not None: + print(f"[BM3] Right eye slope: {s['slope_right_ms_per_arcmin']:+.4f} ms/arcmin") +if s['slope_diff'] is not None: + print(f"[BM3] Slope diff L−R: {s['slope_diff']:+.4f} ms/arcmin " + f"(positive = left more SF-dependent = left more affected)") +elif not s['per_condition_p100_ms']: + print("[BM3] Insufficient per-condition P100 detections for slope estimation") +else: + print("[BM3] Need both eyes detected for slope difference") + +# Scatter + regression lines +if s['per_condition_p100_ms']: + fig_bm3, ax_bm3 = plt.subplots(figsize=(7, 5)) + for eye_prefix, color, eye_label, slope_key in [ + ('left_eye', '#1f77b4', 'Left Eye', 'slope_left_ms_per_arcmin'), + ('right_eye', '#d62728', 'Right Eye', 'slope_right_ms_per_arcmin'), + ]: + pts = [] + for cond_key, lat_ms in s['per_condition_p100_ms'].items(): + if cond_key.startswith(eye_prefix) and lat_ms is not None: + size_label = cond_key.split('/')[1] + if size_label in CHECK_SIZE_ARCMIN: + pts.append((CHECK_SIZE_ARCMIN[size_label], lat_ms)) + if not pts: + continue + pts.sort() + xs_p, ys_p = zip(*pts) + ax_bm3.scatter(xs_p, ys_p, color=color, s=100, zorder=5) + + slope_val = s.get(slope_key) + if slope_val is not None and len(pts) >= 2: + intercept = np.mean(ys_p) - slope_val * np.mean(xs_p) + x_fit = np.linspace(min(xs_p) - 5, max(xs_p) + 5, 80) + ax_bm3.plot(x_fit, slope_val * x_fit + intercept, color=color, lw=2, + label=f'{eye_label}: {slope_val:+.3f} ms/arcmin') + else: + ax_bm3.plot(xs_p, ys_p, color=color, lw=1.5, label=eye_label) + + if s['slope_diff'] is not None: + ax_bm3.annotate(f'Slope diff L−R = {s["slope_diff"]:+.3f} ms/arcmin', + xy=(0.05, 0.95), xycoords='axes fraction', + ha='left', va='top', fontsize=10, color='#7d3c98', + fontweight='bold') + ax_bm3.set_xlabel('Check size (arcmin)') + ax_bm3.set_ylabel('P100 latency at Oz (ms)') + ax_bm3.set_title(f'[{ref_label}] BM3: P100 latency vs check size (slope)') + ax_bm3.legend() + ax_bm3.grid(True, alpha=0.3) + fig_bm3.tight_layout() + plt.show() + +############################################################################### +# ## BM4 — P100 amplitude ratio L/R +# +# Inter-ocular amplitude ratio at P100. Less specific than latency but +# computed for free from the same recordings. |log2(L/R)| > 1 (ratio outside +# ~0.5–2.0) flags attenuated drive on the lower-amplitude side. +# +# Caveat: amplitude is sensitive to electrode contact / pulse artifact / +# subject alertness. Treat amplitude ratios as supportive evidence rather than +# standalone biomarkers until several clean baseline sessions bracket the +# day-to-day variance. +# + +results['amplitude'] = compute_amplitude_ratio(p100_left, p100_right) + +print("\n--- BM4: Amplitude ratio L/R ---") +if results['amplitude'] is None: + if p100_left is None or p100_right is None: + print("[BM4] Cannot compute — P100 not detected in one or both eyes") + else: + print("[BM4] Right-eye P100 amplitude is zero; ratio undefined") +else: + a = results['amplitude'] + status = 'FLAG' if a['flag'] else 'within ±1 log₂' + print(f"[BM4] P100 amplitude Left = {a['amp_left_uv']:.2f} µV") + print(f"[BM4] P100 amplitude Right = {a['amp_right_uv']:.2f} µV") + print(f"[BM4] L/R ratio = {a['ratio']:.2f} (log₂ = {a['log2_ratio']:+.2f}) [{status}]") + + fig_bm4, ax_bm4 = plt.subplots(figsize=(6, 5)) + ax_bm4.bar([0, 1], [a['amp_left_uv'], a['amp_right_uv']], + color=['#1f77b4', '#d62728'], alpha=0.75, width=0.45) + ax_bm4.set_xticks([0, 1]) + ax_bm4.set_xticklabels(['Left Eye', 'Right Eye'], fontsize=12) + ax_bm4.set_ylabel('P100 amplitude at Oz (µV)') + flag_marker = ' ⚑ FLAG' if a['flag'] else '' + ax_bm4.set_title(f'[{ref_label}] BM4: P100 amplitude L/R\n' + f'ratio = {a["ratio"]:.2f} (log₂ = {a["log2_ratio"]:+.2f}){flag_marker}') + ax_bm4.grid(axis='y', alpha=0.3) + fig_bm4.tight_layout() + plt.show() + +############################################################################### +# ## BM5 — Bootstrap P100 / IOLD confidence intervals +# +# The 8 ms IOLD threshold is only meaningful relative to the precision of +# the L and R latency estimates. A 7 ms IOLD with ±2 ms CI is clinically +# suspicious; a 7 ms IOLD with ±5 ms CI is noise. +# +# Trial-resamples with replacement, recomputes the trimmed-mean evoked at +# PICK_CH, locates the positive peak in the P100 search window. The IOLD CI +# uses pairwise differences of two independent bootstrap samples. +# + +N_BOOT = 1000 +BOOT_SEED = 0 + +boot_left = bootstrap_p100_latency( + ch_epochs_corr, event_id, PICK_CH, 'left_eye', + win_ms=P100_WIN_MS, n_boot=N_BOOT, seed=BOOT_SEED, +) +boot_right = bootstrap_p100_latency( + ch_epochs_corr, event_id, PICK_CH, 'right_eye', + win_ms=P100_WIN_MS, n_boot=N_BOOT, seed=BOOT_SEED + 1, +) + +print("\n--- BM5: Bootstrap P100 / IOLD confidence intervals ---") + +if boot_left is not None and boot_right is not None: + l_med, l_lo, l_hi = (np.percentile(boot_left, 50), + np.percentile(boot_left, 2.5), + np.percentile(boot_left, 97.5)) + r_med, r_lo, r_hi = (np.percentile(boot_right, 50), + np.percentile(boot_right, 2.5), + np.percentile(boot_right, 97.5)) + diffs = boot_left - boot_right + d_med, d_lo, d_hi = (np.percentile(diffs, 50), + np.percentile(diffs, 2.5), + np.percentile(diffs, 97.5)) + excludes_zero = (d_lo > 0) or (d_hi < 0) + excl_8ms = (d_lo > 8.0) or (d_hi < -8.0) + print(f"[BM5] Left P100 median = {l_med:.2f} ms 95% CI = [{l_lo:.2f}, {l_hi:.2f}] " + f"(±{(l_hi-l_lo)/2:.2f} ms)") + print(f"[BM5] Right P100 median = {r_med:.2f} ms 95% CI = [{r_lo:.2f}, {r_hi:.2f}] " + f"(±{(r_hi-r_lo)/2:.2f} ms)") + print(f"[BM5] IOLD (L−R) median = {d_med:+.2f} ms 95% CI = [{d_lo:+.2f}, {d_hi:+.2f}]") + print(f"[BM5] {'CI excludes 0 — significant' if excludes_zero else 'CI includes 0 — not separable'}") + print(f"[BM5] {'CI excludes ±8 ms — clinically meaningful' if excl_8ms else 'CI overlaps ±8 ms — borderline'}") + + results['bootstrap'] = { + 'n_boot': int(N_BOOT), + 'win_ms': list(P100_WIN_MS), + 'left_p100_median_ms': _f(l_med), + 'left_p100_ci_lo_ms': _f(l_lo), + 'left_p100_ci_hi_ms': _f(l_hi), + 'right_p100_median_ms': _f(r_med), + 'right_p100_ci_lo_ms': _f(r_lo), + 'right_p100_ci_hi_ms': _f(r_hi), + 'iold_median_ms': _f(d_med), + 'iold_ci_lo_ms': _f(d_lo), + 'iold_ci_hi_ms': _f(d_hi), + 'iold_excludes_zero': bool(excludes_zero), + 'iold_excludes_8ms': bool(excl_8ms), + } + + # Bootstrap latency distribution + fig_bm5, ax_bm5 = plt.subplots(figsize=(10, 5)) + bins5 = np.arange(P100_WIN_MS[0], P100_WIN_MS[1] + 4, 4) + ax_bm5.hist(boot_left, bins=bins5, alpha=0.5, color='#1f77b4', + label=f'Left ({l_med:.1f} ms, 95% CI [{l_lo:.1f}, {l_hi:.1f}])') + ax_bm5.hist(boot_right, bins=bins5, alpha=0.5, color='#d62728', + label=f'Right ({r_med:.1f} ms, 95% CI [{r_lo:.1f}, {r_hi:.1f}])') + ax_bm5.axvline(l_med, color='#1f77b4', linestyle='--', alpha=0.8) + ax_bm5.axvline(r_med, color='#d62728', linestyle='--', alpha=0.8) + ax_bm5.axvspan(l_lo, l_hi, alpha=0.10, color='#1f77b4') + ax_bm5.axvspan(r_lo, r_hi, alpha=0.10, color='#d62728') + flag_str5 = ' ⚑ CI excludes 0' if excludes_zero else ' CI includes 0' + ax_bm5.set_xlabel('P100 latency (ms)') + ax_bm5.set_ylabel(f'Bootstrap count (N={N_BOOT})') + ax_bm5.set_title(f'[{ref_label}] BM5: Bootstrap P100 distribution — {PICK_CH}\n' + f'IOLD = {d_med:+.2f} ms 95% CI [{d_lo:+.2f}, {d_hi:+.2f}]{flag_str5}') + ax_bm5.legend(loc='upper right') + ax_bm5.grid(True, alpha=0.3) + fig_bm5.tight_layout() + plt.show() +else: + print("[BM5] Insufficient trials in one or both eyes — skipping") + results['bootstrap'] = None + +# ========================================================================= +# SECTION 2: Morphological Indicators +# +# BM6 examines the shape of the P100 itself rather than its latency or +# amplitude, targeting waveform distortions that can accompany partial +# demyelination or multifocal lesions. +# ========================================================================= +print("\n" + "="*70) +print("SECTION 2 — Morphological") +print("="*70) + +############################################################################### +# ## BM6 — W-peak (bifurcated P100) +# +# Partial demyelination or multifocal lesions can split the P100 into two +# distinct peaks (a "W" shape), reflecting asynchronous arrival of fast and +# slow fibre populations. The search window is 80–130 ms; two peaks are flagged +# when each rises > 1 µV above the dip between them. +# + +print("\n--- BM6: W-peak (bifurcated P100) ---") + +wpeak_results = {} +for eye_name, ev_data in [('Left Eye', evoked_left_corr_avg), + ('Right Eye', evoked_right_corr_avg)]: + if ev_data is None: + continue + oz_idx = ev_data.ch_names.index(PICK_CH) + t_ms = ev_data.times * 1000 + oz_uv = ev_data.data[oz_idx] * 1e6 + + w_mask = (t_ms >= 80) & (t_ms <= 130) + oz_win = oz_uv[w_mask] + t_win = t_ms[w_mask] + peaks_w, _ = find_peaks(oz_win, prominence=0.5, distance=3) + + flagged = False + if len(peaks_w) >= 2: + p1_t, p2_t = t_win[peaks_w[0]], t_win[peaks_w[1]] + p1_v, p2_v = oz_win[peaks_w[0]], oz_win[peaks_w[1]] + dip_v = float(np.min(oz_win[peaks_w[0]:peaks_w[1] + 1])) + if p1_v - dip_v > 1.0 and p2_v - dip_v > 1.0: + flagged = True + print(f"[BM6 {eye_name}] FLAG: W-peak detected on {PICK_CH}") + print(f" Peak 1: {p1_v:.2f} µV at {p1_t:.1f} ms") + print(f" Peak 2: {p2_v:.2f} µV at {p2_t:.1f} ms") + print(f" Dip: {dip_v:.2f} µV (depths: P1−dip={p1_v-dip_v:.2f}, P2−dip={p2_v-dip_v:.2f})") + wpeak_results[eye_name] = {'flagged': True, 'p1_ms': _f(p1_t), 'p2_ms': _f(p2_t), + 'p1_uv': _f(p1_v), 'p2_uv': _f(p2_v), 'dip_uv': _f(dip_v)} + if not flagged: + print(f"[BM6 {eye_name}] Normal single P100 morphology") + wpeak_results[eye_name] = {'flagged': False} + +results['wpeak'] = wpeak_results + +# Waveform plot with peak annotations +fig_bm6, axes_bm6 = plt.subplots(1, 2, figsize=(14, 5), sharey=True) +for ax, (eye_name, ev_data) in zip(axes_bm6, + [('Left Eye', evoked_left_corr_avg), + ('Right Eye', evoked_right_corr_avg)]): + if ev_data is None: + continue + oz_idx = ev_data.ch_names.index(PICK_CH) + t_ms = ev_data.times * 1000 + oz_uv = ev_data.data[oz_idx] * 1e6 + + ax.plot(t_ms, oz_uv, color='black', lw=2, label=PICK_CH) + ax.axvspan(80, 130, alpha=0.07, color='green', label='W-peak search (80–130 ms)') + + # Annotate detected peaks in search window + w_mask = (t_ms >= 80) & (t_ms <= 130) + oz_win = oz_uv[w_mask] + t_win = t_ms[w_mask] + peaks_w, _ = find_peaks(oz_win, prominence=0.5, distance=3) + for pi, pk in enumerate(peaks_w[:3]): + ax.annotate(f'P{pi+1}: {oz_win[pk]:.1f} µV\n@ {t_win[pk]:.0f} ms', + xy=(t_win[pk], oz_win[pk]), + xytext=(t_win[pk] + 8, oz_win[pk] + 1.5), + arrowprops=dict(arrowstyle='->', color='green', lw=1.2), + fontsize=8, color='green') + + for ms, col, lbl in zip(LANDMARK_MS, LANDMARK_COLORS, LANDMARK_LABELS): + ax.axvline(ms, color=col, linestyle='--', alpha=0.5, label=lbl) + + wres = wpeak_results.get(eye_name, {}) + flag_title = ' ⚑ W-PEAK FLAGGED' if wres.get('flagged') else '' + ax.set_title(f'[{ref_label}] BM6: W-peak — {eye_name}{flag_title}') + ax.set_xlabel('Time (ms)') + ax.axhline(0, color='black', alpha=0.3) + ax.axvline(0, color='black', linestyle='--', alpha=0.5) + if ax == axes_bm6[0]: + ax.set_ylabel('Amplitude (µV)') + handles, lbls = ax.get_legend_handles_labels() + ax.legend(dict(zip(lbls, handles)).values(), dict(zip(lbls, handles)).keys(), + fontsize=9, loc='upper right') + +fig_bm6.tight_layout() +plt.show() + +# ========================================================================= +# SECTION 3: Post-chiasmatic / Cortical Biomarkers +# +# BM7–BM12 examine how the VEP distributes across scalp channels, targeting +# post-chiasmatic asymmetries and generator-confirmation checks. +# +# Hemisphere note: due to V1 anatomy (calcarine cortex folded into the +# longitudinal fissure), the P100 dipole projects PARADOXICALLY — right +# hemisphere V1 → left scalp (O1), left hemisphere V1 → right scalp (O2). +# P7/P8 (lateral extrastriate) project more directly ipsilaterally and are +# therefore less ambiguous for cortical-side lateralization. +# ========================================================================= +print("\n" + "="*70) +print("SECTION 3 — Post-chiasmatic / Cortical") +print("="*70) + +############################################################################### +# ## BM7 — Hemispheric asymmetry (O1 vs O2) +# +# Compares P100 latency and amplitude between left (O1) and right (O2) +# occipital channels. A post-chiasmatic lesion typically produces a "crossed +# asymmetry": the P100 is delayed or attenuated over the ipsilateral scalp +# (paradoxical lateralization) regardless of which eye is stimulated. +# +# **Paradoxical lateralization**: deficit at O1 → RIGHT hemisphere lesion; +# deficit at O2 → LEFT hemisphere lesion. +# + +print("\n--- BM7: Hemispheric asymmetry O1/O2 ---") + +fig_bm7, axes_bm7 = plt.subplots(1, 2, figsize=(16, 6), sharey=True) +hemi_per_eye = {} + +for ax, (eye_name, ev_avg) in zip(axes_bm7, + [('Left Eye', evoked_left_corr_avg), + ('Right Eye', evoked_right_corr_avg)]): + if ev_avg is None: + continue + + h = compute_hemi_asymmetry(ev_avg, 'O1', 'O2') + if h is not None: + lat_status = 'FLAG' if h['lat_flag'] else f'within ±{IOLD_FLAG_MS:.0f} ms' + amp_status = 'FLAG' if h['amp_flag'] else 'within ±1 log₂' + print(f"[BM7 {eye_name}] Latency: O1 = {h['lat_o1']:.2f} ms, O2 = {h['lat_o2']:.2f} ms " + f"(O1−O2 = {h['lat_diff_ms']:+.2f} ms [{lat_status}])") + print(f"[BM7 {eye_name}] Amplitude: O1 = {h['amp_o1']:.2f} µV, O2 = {h['amp_o2']:.2f} µV " + f"(O1/O2 = {h['amp_ratio']:.2f} [{amp_status}])") + hemi_per_eye[eye_name] = h + else: + ev_o1 = ev_avg.copy().pick(['O1']) + ev_o2 = ev_avg.copy().pick(['O2']) + _, p100_o1, _ = get_pr_vep_latencies(ev_o1) + _, p100_o2, _ = get_pr_vep_latencies(ev_o2) + if p100_o1 is not None or p100_o2 is not None: + detected = 'O1' if p100_o1 is not None else 'O2' + missing = 'O2' if p100_o1 is not None else 'O1' + print(f"[BM7 {eye_name}] FLAG: P100 at {detected} but absent at {missing}") + else: + print(f"[BM7 {eye_name}] P100 not detected in either O1 or O2") + + # Waveform + t_h = ev_avg.times * 1000 + for ch, color in [('O1', '#9467bd'), ('O2', '#e377c2')]: + if ch in ev_avg.ch_names: + idx = ev_avg.ch_names.index(ch) + ax.plot(t_h, ev_avg.data[idx] * 1e6, color=color, lw=2, label=ch) + for ms, col, lbl in zip(LANDMARK_MS, LANDMARK_COLORS, LANDMARK_LABELS): + ax.axvline(ms, color=col, linestyle='--', alpha=0.6, + label=lbl if ax == axes_bm7[0] else '') + ax.set_title(f'[{ref_label}] BM7: Hemispheric asymmetry — {eye_name}') + ax.set_xlabel('Time (ms)') + ax.axhline(0, color='black', alpha=0.3) + ax.axvline(0, color='black', linestyle='--', alpha=0.5) + if ax == axes_bm7[0]: + ax.set_ylabel('Amplitude (µV)') + ax.legend(loc='upper right') + +fig_bm7.tight_layout() +plt.show() + +results['hemi_o1o2'] = dict(hemi_per_eye) + +############################################################################### +# ## BM8 — Inter-ocular Δ-asymmetry (O1 vs O2) +# +# Per-eye O1/O2 differences conflate the lesion signal with stable anatomical +# and electrode-stationary asymmetries (skull thickness, calcarine fold, etc.). +# Subtracting one eye's asymmetry from the other's cancels those stationary +# contributions: +# +# Δlat = (O1−O2)|left_eye − (O1−O2)|right_eye +# Δlog₂ = log₂(O1/O2)|left_eye − log₂(O1/O2)|right_eye +# +# ≈ 0 → purely anatomical / electrode-stationary. +# Large value → eye-dependent skew: the asymmetry depends on which eye drives +# cortex, implying a pathway signal rather than a scalp constant. +# + +results['hemi_delta_o1o2'] = compute_hemi_delta_asymmetry( + hemi_per_eye.get('Left Eye'), hemi_per_eye.get('Right Eye'), 'O1', 'O2', +) + +print("\n--- BM8: Inter-ocular Δ-asymmetry O1/O2 ---") +if results['hemi_delta_o1o2'] is None: + print("[BM8] Need P100 at both O1 and O2 in both eyes — skipping") +else: + d8 = results['hemi_delta_o1o2'] + print(f"[BM8] (O1−O2) latency: L-eye = {d8['lat_asym_left']:+.2f} ms, " + f"R-eye = {d8['lat_asym_right']:+.2f} ms") + print(f"[BM8] Δlat = {d8['d_lat']:+.2f} ms " + f"(≈0 ⇒ anatomical; large ⇒ eye-dependent)") + print(f"[BM8] log₂(O1/O2): L-eye = {d8['log2_asym_left']:+.2f}, " + f"R-eye = {d8['log2_asym_right']:+.2f}") + print(f"[BM8] Δlog₂ = {d8['d_log2']:+.2f} " + f"(≈0 ⇒ anatomical; large ⇒ eye-dependent)") + + fig_bm8, axes_bm8 = plt.subplots(1, 2, figsize=(10, 5)) + + # Latency asymmetry per eye drive + ax = axes_bm8[0] + lat_vals = [d8['lat_asym_left'] or 0, d8['lat_asym_right'] or 0] + ax.bar(['Left Eye\ndrive', 'Right Eye\ndrive'], lat_vals, + color=['#1f77b4', '#d62728'], alpha=0.75) + ax.axhline(0, color='black', lw=1) + ax.set_ylabel('O1 − O2 latency asymmetry (ms)') + ax.set_title(f'Δlat = {d8["d_lat"]:+.2f} ms') + ax.grid(axis='y', alpha=0.3) + + # Amplitude asymmetry per eye drive + ax = axes_bm8[1] + log2_vals = [d8['log2_asym_left'] or 0, d8['log2_asym_right'] or 0] + ax.bar(['Left Eye\ndrive', 'Right Eye\ndrive'], log2_vals, + color=['#1f77b4', '#d62728'], alpha=0.75) + ax.axhline(0, color='black', lw=1) + ax.set_ylabel('log₂(O1/O2) amplitude asymmetry') + ax.set_title(f'Δlog₂ = {d8["d_log2"]:+.2f}') + ax.grid(axis='y', alpha=0.3) + + fig_bm8.suptitle(f'[{ref_label}] BM8: Inter-ocular Δ-asymmetry O1/O2\n' + '≈0 bars of equal height → stationary anatomy; ' + 'unequal → eye-dependent (pathway signal)', + fontsize=10) + fig_bm8.tight_layout() + plt.show() + +############################################################################### +# ## BM9 — Lateral extrastriate P100 (P7/P8) + Oz→lateral propagation +# +# P7/P8 pick up a P100 from lateral extrastriate cortex (V2/V3/MT), typically +# delayed 5–15 ms after Oz (intracortical V1→extrastriate propagation) and +# lower amplitude. Two reads: +# +# - P100 detection at P7/P8 confirms an extrastriate response exists at all. +# - Oz→lateral propagation latency should be a small positive number (0–25 ms). +# Abnormally long or reversed sign suggests intracortical / extrastriate +# conduction issues distinct from optic-nerve demyelination. +# + +print("\n--- BM9: Lateral extrastriate P7/P8 + Oz→lateral propagation ---") + +lateral_per_eye = {} +results['lateral_p7p8'] = {} + +for eye_name, ev_avg, oz_p100 in [ + ('Left Eye', evoked_left_corr_avg, p100_left), + ('Right Eye', evoked_right_corr_avg, p100_right), +]: + if ev_avg is None: + continue + per_eye = {'oz': oz_p100, 'p7': None, 'p8': None} + per_eye_results = {} + for ch in LATERAL_CHANNELS: + if ch not in ev_avg.ch_names: + continue + _, p100_lat, _ = get_pr_vep_latencies(ev_avg.copy().pick([ch])) + per_eye[ch.lower()] = p100_lat + if p100_lat is None: + print(f"[BM9 {eye_name}] {ch}: P100 not detected") + per_eye_results[ch] = None + continue + lat_ms_v = p100_lat['latency'] * 1000.0 + amp_uv_v = p100_lat['amplitude'] * 1e6 + print(f"[BM9 {eye_name}] {ch}: P100 = {amp_uv_v:+.2f} µV at {lat_ms_v:.2f} ms") + ch_entry = {'lat_ms': _f(lat_ms_v), 'amp_uv': _f(amp_uv_v), 'propagation_ms': None} + if oz_p100 is not None: + prop_ms = lat_ms_v - oz_p100['latency'] * 1000.0 + in_range = -2 <= prop_ms <= 25 + status = 'normal' if in_range else 'OUT OF RANGE' + print(f"[BM9 {eye_name}] {ch}−Oz propagation: {prop_ms:+.2f} ms " + f"[{status}, expected −2 to +25 ms]") + ch_entry['propagation_ms'] = _f(prop_ms) + ch_entry['propagation_in_range'] = bool(in_range) + per_eye_results[ch] = ch_entry + lateral_per_eye[eye_name] = per_eye + results['lateral_p7p8'][eye_name] = per_eye_results + +# Waveform: Oz, P7, P8 per eye +fig_bm9, axes_bm9 = plt.subplots(1, 2, figsize=(14, 5), sharey=True) +lat_ch_colors = {'Oz': 'black', 'P7': '#1f77b4', 'P8': '#d62728'} +lat_ch_lw = {'Oz': 2.5, 'P7': 1.8, 'P8': 1.8} + +for ax, (eye_name, ev_avg) in zip(axes_bm9, + [('Left Eye', evoked_left_corr_avg), + ('Right Eye', evoked_right_corr_avg)]): + if ev_avg is None: + continue + t_lat = ev_avg.times * 1000 + for ch in ['Oz', 'P7', 'P8']: + if ch not in ev_avg.ch_names: + continue + idx = ev_avg.ch_names.index(ch) + ax.plot(t_lat, ev_avg.data[idx] * 1e6, + color=lat_ch_colors[ch], lw=lat_ch_lw[ch], label=ch) + + # Annotate propagation delays + eye_lat = results['lateral_p7p8'].get(eye_name, {}) + for ch in ['P7', 'P8']: + ch_entry = eye_lat.get(ch) + if ch_entry and ch_entry.get('propagation_ms') is not None: + lat_ms_v = ch_entry['lat_ms'] + prop_ms = ch_entry['propagation_ms'] + ok_str = '✓' if ch_entry.get('propagation_in_range', True) else '⚑' + # find the y value at this latency for annotation placement + t_idx_ann = int(np.argmin(np.abs(t_lat - lat_ms_v))) + y_ann = ev_avg.data[ev_avg.ch_names.index(ch), t_idx_ann] * 1e6 + ax.annotate(f'{ch}: +{prop_ms:.0f}ms {ok_str}', + xy=(lat_ms_v, y_ann), + xytext=(lat_ms_v + 15, y_ann + 1.5), + arrowprops=dict(arrowstyle='->', color=lat_ch_colors[ch], lw=1), + fontsize=8, color=lat_ch_colors[ch]) + + for ms, col, lbl in zip(LANDMARK_MS, LANDMARK_COLORS, LANDMARK_LABELS): + ax.axvline(ms, color=col, linestyle='--', alpha=0.5, + label=lbl if ax == axes_bm9[0] else '') + ax.set_title(f'[{ref_label}] BM9: Lateral extrastriate — {eye_name}') + ax.set_xlabel('Time (ms)') + ax.axhline(0, color='black', alpha=0.3) + ax.axvline(0, color='black', linestyle='--', alpha=0.5) + if ax == axes_bm9[0]: + ax.set_ylabel('Amplitude (µV)') + handles, lbls = ax.get_legend_handles_labels() + ax.legend(dict(zip(lbls, handles)).values(), dict(zip(lbls, handles)).keys(), + fontsize=9, loc='upper right') + +fig_bm9.tight_layout() +plt.show() + +############################################################################### +# ## BM10 — Lateral extrastriate asymmetry (P7 vs P8) + inter-ocular contrast +# +# Same logic as BM7/BM8 but on P7/P8. Critical advantage: extrastriate +# generators sit on the lateral cortical surface, so paradoxical lateralization +# is much weaker here than at O1/O2. P7/P8 hemispheric asymmetry is therefore +# a more direct readout of cortical-side asymmetry — useful for detecting +# unilateral lateral-occipital / parieto-occipital involvement. +# + +print("\n--- BM10: Lateral asymmetry P7/P8 + inter-ocular contrast ---") + +p7p8_asym_per_eye = {} +for eye_name, ev_avg in [('Left Eye', evoked_left_corr_avg), + ('Right Eye', evoked_right_corr_avg)]: + if ev_avg is None: + continue + h = compute_hemi_asymmetry(ev_avg, 'P7', 'P8') + if h is None: + print(f"[BM10 {eye_name}] P100 not detected at both P7 and P8 — skipping") + continue + lat_status = 'FLAG' if h['lat_flag'] else f'within ±{IOLD_FLAG_MS:.0f} ms' + amp_status = 'FLAG' if h['amp_flag'] else 'within ±1 log₂' + print(f"[BM10 {eye_name}] P7 = {h['lat_p7']:.2f} ms, P8 = {h['lat_p8']:.2f} ms " + f"(P7−P8 = {h['lat_diff_ms']:+.2f} ms [{lat_status}])") + print(f"[BM10 {eye_name}] P7 = {h['amp_p7']:.2f} µV, P8 = {h['amp_p8']:.2f} µV " + f"(P7/P8 = {h['amp_ratio']:.2f} [{amp_status}])") + p7p8_asym_per_eye[eye_name] = h + +results['hemi_p7p8'] = dict(p7p8_asym_per_eye) +results['hemi_delta_p7p8'] = compute_hemi_delta_asymmetry( + p7p8_asym_per_eye.get('Left Eye'), p7p8_asym_per_eye.get('Right Eye'), 'P7', 'P8', +) + +if results['hemi_delta_p7p8'] is not None: + d10 = results['hemi_delta_p7p8'] + print(f"[BM10] Δlat (P7−P8): {d10['d_lat']:+.2f} ms " + f"(≈0 ⇒ stationary; large ⇒ eye-dependent)") + print(f"[BM10] Δlog₂(P7/P8): {d10['d_log2']:+.2f} " + f"(≈0 ⇒ stationary; large ⇒ eye-dependent)") + + fig_bm10, axes_bm10 = plt.subplots(1, 2, figsize=(10, 5)) + + ax = axes_bm10[0] + ax.bar(['Left Eye\ndrive', 'Right Eye\ndrive'], + [d10['lat_asym_left'] or 0, d10['lat_asym_right'] or 0], + color=['#1f77b4', '#d62728'], alpha=0.75) + ax.axhline(0, color='black', lw=1) + ax.set_ylabel('P7 − P8 latency asymmetry (ms)') + ax.set_title(f'Δlat = {d10["d_lat"]:+.2f} ms') + ax.grid(axis='y', alpha=0.3) + + ax = axes_bm10[1] + ax.bar(['Left Eye\ndrive', 'Right Eye\ndrive'], + [d10['log2_asym_left'] or 0, d10['log2_asym_right'] or 0], + color=['#1f77b4', '#d62728'], alpha=0.75) + ax.axhline(0, color='black', lw=1) + ax.set_ylabel('log₂(P7/P8) amplitude asymmetry') + ax.set_title(f'Δlog₂ = {d10["d_log2"]:+.2f}') + ax.grid(axis='y', alpha=0.3) + + fig_bm10.suptitle(f'[{ref_label}] BM10: Inter-ocular Δ-asymmetry P7/P8\n' + '≈0 → stationary anatomy; unequal → eye-dependent (cortical-side signal)', + fontsize=10) + fig_bm10.tight_layout() + plt.show() + +############################################################################### +# ## BM11 — Combined lateral hemisphere composites +# +# Per-hemisphere composites: L-hemi = (O1+P7)/2, R-hemi = (O2+P8)/2. +# Three advantages over O1/O2 alone: +# - SNR ≈ √2 better (averaging two channels per hemisphere). +# - Dilutes the contribution of any single bad electrode contact. +# - Mixes the paradoxically-projected V1 component (O1/O2) with the +# directly-projected extrastriate component (P7/P8), giving a composite +# that is less ambiguous about cortical side than O1/O2 alone. +# + +print("\n--- BM11: Combined lateral hemisphere composites ---") + +fig_bm11, axes_bm11 = plt.subplots(1, 2, figsize=(16, 6), sharey=True) +composite_p100 = {} + +for ax, (eye_name, ev_avg, color) in zip(axes_bm11, [ + ('Left Eye', evoked_left_corr_avg, 'blue'), + ('Right Eye', evoked_right_corr_avg, 'red'), +]): + if ev_avg is None: + continue + if not all(ch in ev_avg.ch_names for ch in ('O1', 'O2', 'P7', 'P8')): + continue + idx_o1 = ev_avg.ch_names.index('O1') + idx_o2 = ev_avg.ch_names.index('O2') + idx_p7 = ev_avg.ch_names.index('P7') + idx_p8 = ev_avg.ch_names.index('P8') + + l_hemi = (ev_avg.data[idx_o1] + ev_avg.data[idx_p7]) / 2.0 + r_hemi = (ev_avg.data[idx_o2] + ev_avg.data[idx_p8]) / 2.0 + t_comp = ev_avg.times * 1000 + + ax.plot(t_comp, l_hemi * 1e6, color='#1f77b4', lw=2, + label='L-hemi (O1+P7)/2') + ax.plot(t_comp, r_hemi * 1e6, color='#d62728', lw=2, + label='R-hemi (O2+P8)/2') + + win_mask = (t_comp >= P100_WIN_MS[0]) & (t_comp <= P100_WIN_MS[1]) + win_t = t_comp[win_mask] + l_lat = win_t[np.argmax(l_hemi[win_mask])] + r_lat = win_t[np.argmax(r_hemi[win_mask])] + l_amp = float(l_hemi[win_mask].max() * 1e6) + r_amp = float(r_hemi[win_mask].max() * 1e6) + composite_p100[eye_name] = {'l_lat': l_lat, 'r_lat': r_lat, + 'l_amp': l_amp, 'r_amp': r_amp} + print(f"[BM11 {eye_name}] L-hemi P100 = {l_amp:+.2f} µV @ {l_lat:.1f} ms | " + f"R-hemi P100 = {r_amp:+.2f} µV @ {r_lat:.1f} ms") + + for ms, col, lbl in zip(LANDMARK_MS, LANDMARK_COLORS, LANDMARK_LABELS): + ax.axvline(ms, color=col, linestyle='--', alpha=0.6, + label=lbl if ax == axes_bm11[0] else '') + ax.set_title(f'[{ref_label}] BM11: Composite hemispheres — {eye_name}') + ax.set_xlabel('Time (ms)') + if ax == axes_bm11[0]: + ax.set_ylabel('Amplitude (µV)') + ax.axhline(0, color='black', alpha=0.3) + ax.axvline(0, color='black', linestyle='--', alpha=0.5) + ax.legend(loc='upper right') + +fig_bm11.tight_layout() +plt.show() + +results['composite'] = {eye: {k: _f(v) for k, v in d.items()} + for eye, d in composite_p100.items()} +results['composite_delta'] = None +if 'Left Eye' in composite_p100 and 'Right Eye' in composite_p100: + Lc = composite_p100['Left Eye'] + Rc = composite_p100['Right Eye'] + d_lat_comp = (Lc['l_lat'] - Lc['r_lat']) - (Rc['l_lat'] - Rc['r_lat']) + log2_L = np.log2(Lc['l_amp'] / Lc['r_amp']) if Lc['r_amp'] > 0 else float('nan') + log2_R = np.log2(Rc['l_amp'] / Rc['r_amp']) if Rc['r_amp'] > 0 else float('nan') + d_log2_comp = log2_L - log2_R + print(f"[BM11] Composite Δlat = {d_lat_comp:+.2f} ms (eye-dependent hemispheric latency skew)") + print(f"[BM11] Composite Δlog₂ = {d_log2_comp:+.2f} (eye-dependent hemispheric amplitude skew)") + results['composite_delta'] = {'d_lat': _f(d_lat_comp), 'd_log2': _f(d_log2_comp)} + +############################################################################### +# ## BM12 — Topology QC: Pz gradient + Fz Halliday polarity inversion +# +# Halliday's frontal polarity-inversion check: a genuine V1-generated P100 +# produces a *negative* deflection at Fz at the same latency, because the +# posterior-pointing dipole projects with inverted polarity to the frontal +# scalp. This is a strong generator-confirmation that artifact, EMG, or +# alpha contamination cannot mimic. Pz should additionally show a smaller +# positive P100 (gradient: Oz > Pz, Fz < 0). +# +# Only meaningful under linked-mastoid: Fz is zero by construction under +# the Oz−Fz reference scheme. +# + +results['topology'] = None +print("\n--- BM12: Topology QC (Pz gradient + Fz Halliday inversion) ---") + +topology_results = {} +for eye_name, ev_avg, oz_p100 in [ + ('Left Eye', evoked_left_corr_avg, p100_left), + ('Right Eye', evoked_right_corr_avg, p100_right), +]: + if ev_avg is None or oz_p100 is None: + print(f"[BM12 {eye_name}] Oz P100 not detected — skipping") + continue + + oz_lat_s = oz_p100['latency'] + t_idx = int(round((oz_lat_s - ev_avg.tmin) * ev_avg.info['sfreq'])) + t_idx = max(0, min(t_idx, ev_avg.data.shape[1] - 1)) + + oz_val = ev_avg.data[ev_avg.ch_names.index('Oz'), t_idx] * 1e6 + pz_val = ev_avg.data[ev_avg.ch_names.index('Pz'), t_idx] * 1e6 if 'Pz' in ev_avg.ch_names else None + fz_val = ev_avg.data[ev_avg.ch_names.index('Fz'), t_idx] * 1e6 if 'Fz' in ev_avg.ch_names else 0.0 + + if 'M2' in ev_avg.ch_names: + m2_val = ev_avg.data[ev_avg.ch_names.index('M2'), t_idx] * 1e6 + oz_val -= m2_val + if pz_val is not None: + pz_val -= m2_val + fz_val -= m2_val + ref_note = "re-referenced to M2" + else: + ref_note = "using current ref" + + entry = {'oz_amp_uv': _f(oz_val), 'pz_amp_uv': None, 'fz_amp_uv': None, + 'pz_gradient_ok': None, 'fz_inversion_ok': None} + + if pz_val is not None: + pz_ok = 0 < pz_val < oz_val + pz_str = 'OK (positive, < Oz)' if pz_ok else 'FLAG: gradient broken' + print(f"[BM12 {eye_name}] Pz @ {oz_lat_s*1000:.1f} ms: {pz_val:+.2f} µV " + f"(Oz = {oz_val:+.2f} µV) [{pz_str}, {ref_note}]") + entry['pz_amp_uv'] = _f(pz_val) + entry['pz_gradient_ok'] = bool(pz_ok) + + fz_ok = fz_val < 0 + fz_str = ('OK (inverted ⇒ V1 generator confirmed)' + if fz_ok else 'FLAG: same-sign as Oz') + print(f"[BM12 {eye_name}] Fz @ {oz_lat_s*1000:.1f} ms: {fz_val:+.2f} µV " + f"(Oz = {oz_val:+.2f} µV) [{fz_str}, {ref_note}]") + entry['fz_amp_uv'] = _f(fz_val) + entry['fz_inversion_ok'] = bool(fz_ok) + + topology_results[eye_name] = entry + +results['topology'] = topology_results + +if topology_results: + fig_bm12, ax_bm12 = plt.subplots(figsize=(8, 5)) + eyes_t = list(topology_results.keys()) + x_t = np.arange(len(eyes_t)) + w_t = 0.22 + ch_order = [('oz_amp_uv', 'Oz', 'black'), + ('pz_amp_uv', 'Pz', '#2ca02c'), + ('fz_amp_uv', 'Fz', '#ff7f0e')] + for i, (key, label, color) in enumerate(ch_order): + vals = [topology_results.get(eye, {}).get(key) or 0 for eye in eyes_t] + ax_bm12.bar(x_t + (i - 1) * w_t, vals, w_t, + color=color, alpha=0.75, label=label) + ax_bm12.axhline(0, color='black', lw=0.8) + ax_bm12.set_xticks(x_t) + ax_bm12.set_xticklabels(eyes_t) + ax_bm12.set_ylabel('Amplitude at Oz-P100 latency (µV)') + ax_bm12.set_title(f'[{ref_label}] BM12: Topology QC (M2 referenced)\n' + 'Oz > 0, Pz > 0 (gradient), Fz < 0 (Halliday inversion)') + ax_bm12.legend() + ax_bm12.grid(axis='y', alpha=0.3) + fig_bm12.tight_layout() + plt.show() + +# ========================================================================= +# SUMMARY +# ========================================================================= +print("\n" + "="*70) +print("BIOMARKER SUMMARY") +print("="*70) + +_summary = [] + +def _row(bm_id, name, flag, value_str): + _summary.append({'id': bm_id, 'name': name, 'flag': flag, 'value': value_str}) + +def _fmt_delta(d, lat_key='d_lat', log2_key='d_log2'): + if not d or d.get(lat_key) is None: + return 'N/A' + log2_v = d.get(log2_key) + return f'Δlat={d[lat_key]:+.2f} ms, Δlog₂=' + (f'{log2_v:+.2f}' if log2_v is not None else 'N/A') + +iold = results.get('iold') or {} +_row('BM1', 'IOLD pooled (Oz)', + iold.get('flag'), + f'{iold["iold_ms"]:+.1f} ms' if iold.get('iold_ms') is not None else 'N/A') + +for size, d_sz in (results.get('iold_per_size') or {}).items(): + _row(f'BM2/{size}', f'IOLD {size} checks (Oz)', + d_sz['flag'] if d_sz else None, + f'{d_sz["iold_ms"]:+.1f} ms' if d_sz and d_sz.get('iold_ms') is not None else 'N/A') + +_s = results.get('slope') or {} +_row('BM3', 'Check-size slope diff (Oz)', + None, + f'{_s["slope_diff"]:+.4f} ms/arcmin' if _s.get('slope_diff') is not None else 'N/A') + +_a = results.get('amplitude') or {} +_row('BM4', 'Amplitude ratio L/R (Oz)', + _a.get('flag'), + (f'{_a["ratio"]:.2f} (log₂={_a["log2_ratio"]:+.2f})' + if _a.get('ratio') is not None else 'N/A')) + +_b = results.get('bootstrap') or {} +_row('BM5', 'Bootstrap IOLD 95% CI', + (not _b.get('iold_excludes_zero')) if _b else None, + (f'[{_b["iold_ci_lo_ms"]:+.1f}, {_b["iold_ci_hi_ms"]:+.1f}] ms' + if _b and _b.get('iold_ci_lo_ms') is not None else 'N/A')) + +_w = results.get('wpeak') or {} +any_wpeak = any(v.get('flagged') for v in _w.values()) +_row('BM6', 'W-peak (bifurcated P100)', + any_wpeak if _w else None, + 'Flagged in: ' + ', '.join(e for e, v in _w.items() if v.get('flagged')) if any_wpeak else 'None detected') + +for eye_name, h7 in (results.get('hemi_o1o2') or {}).items(): + _row(f'BM7/{eye_name[:1]}', f'Hemi asym O1/O2 — {eye_name}', + h7.get('lat_flag') or h7.get('amp_flag'), + (f'lat diff={h7["lat_diff_ms"]:+.1f} ms, amp ratio={h7["amp_ratio"]:.2f}' + if h7 else 'N/A')) + +_row('BM8', 'Δ-asymmetry O1/O2 (eye-dependent?)', + None, _fmt_delta(results.get('hemi_delta_o1o2') or {})) + +for eye_name, h_lat in results.get('lateral_p7p8', {}).items(): + vals_9 = [f'{ch}: {v["propagation_ms"]:+.0f}ms' if v and v.get('propagation_ms') is not None else f'{ch}: N/A' + for ch, v in h_lat.items()] + any_oor = any(v and v.get('propagation_in_range') is False for v in h_lat.values()) + _row(f'BM9/{eye_name[:1]}', f'Lateral P7/P8 propagation — {eye_name}', + any_oor if h_lat else None, + ' '.join(vals_9)) + +_row('BM10', 'Δ-asymmetry P7/P8 (eye-dependent?)', + None, _fmt_delta(results.get('hemi_delta_p7p8') or {})) + +_row('BM11', 'Composite hemi Δ', + None, _fmt_delta(results.get('composite_delta') or {})) + +_topo = results.get('topology') or {} +topo_flags = [e for e, entry in _topo.items() + if entry and (entry.get('pz_gradient_ok') is False + or entry.get('fz_inversion_ok') is False)] +_row('BM12', 'Topology QC (Pz gradient + Fz inversion)', + bool(topo_flags) if _topo else None, + 'FLAG: ' + ', '.join(topo_flags) if topo_flags else ('OK' if _topo else 'Skipped (not LM ref)')) + +print(f"\n {'BM':<10} {'Name':<48} {'Status':<12} Value") +print(" " + "-"*90) +for row in _summary: + flag = row['flag'] + status_str = '⚑ FLAGGED' if flag is True else ('N/A' if flag is None else 'OK') + print(f" {row['id']:<10} {row['name']:<48} {status_str:<12} {row['value']}") + +############################################################################### +# ## Summary dashboard figure +# + +n_rows = len(_summary) +fig_sum, ax_sum = plt.subplots(figsize=(13, max(5, n_rows * 0.42 + 1.0))) + +bar_colors = ['#e74c3c' if r['flag'] is True + else ('#95a5a6' if r['flag'] is None + else '#27ae60') + for r in _summary] + +ax_sum.barh(range(n_rows), [1] * n_rows, color=bar_colors, alpha=0.85, height=0.7) + +for i, row in enumerate(reversed(_summary)): + y = i + flag = row['flag'] + status_str = '⚑ FLAGGED' if flag is True else ('N/A' if flag is None else 'OK') + # biomarker label on the left + ax_sum.text(-0.02, y, f"{row['id']}: {row['name']}", + ha='right', va='center', fontsize=9) + # status text inside bar + ax_sum.text(0.5, y, status_str, + ha='center', va='center', fontsize=9, + fontweight='bold', color='white') + # value on the right + ax_sum.text(1.02, y, row['value'], + ha='left', va='center', fontsize=8, color='#444444') + +ax_sum.set_xlim(-4.5, 4.5) +ax_sum.set_ylim(-0.5, n_rows - 0.5) +ax_sum.axis('off') +ax_sum.set_title(f'[{ref_label}] Biomarker Summary Dashboard\n' + 'Green = OK Red = Flagged Grey = N/A or not computed', + fontsize=11, pad=12) +fig_sum.tight_layout() +plt.show() + +############################################################################### +# ## Persist biomarkers to disk +# +# Read-merge-write into ``biomarkers.json``: this run's REF_SCHEME slot is +# updated; other ref schemes already persisted from prior runs are preserved. +# Run the notebook once per REF_SCHEME to populate both keys. +# + +biomarker_path = recording_dir / 'biomarkers.json' +if biomarker_path.exists(): + with open(biomarker_path, 'r', encoding='utf-8') as f: + biomarker_payload = json.load(f) + biomarker_payload.setdefault('references', {}) +else: + biomarker_payload = {'references': {}} + +biomarker_payload.update({ + 'subject_id': SUBJECT_ID, + 'session_nb': SESSION_NB, + 'device_name': DEVICE_NAME, + 'experiment': EXPERIMENT, + 'site': SITE, + 'display': DISPLAY, + 'montage': MONTAGE, + 'analysis_timestamp': datetime.datetime.now().isoformat(timespec='seconds'), + 'link_panel_lag_ms': link_panel_lag * 1000.0, + 'link_panel_lag_err_ms': link_panel_lag_err * 1000.0, + 'pc_lag_ms_mean': float(pc_lag_s.mean() * 1000.0), + 'pc_lag_ms_std': float(pc_lag_s.std() * 1000.0), + 'n_recordings': len(recording_files), +}) +biomarker_payload['references'][ref_label] = results + +with open(biomarker_path, 'w', encoding='utf-8') as f: + json.dump(biomarker_payload, f, indent=2, ensure_ascii=False) +print(f"\n[persist] biomarkers written to: {biomarker_path}") +print(f"[persist] reference schemes now persisted: {list(biomarker_payload['references'])}") diff --git a/examples/visual_vep/02r__pattern_reversal_longitudinal.py b/examples/visual_vep/02r__pattern_reversal_longitudinal.py new file mode 100644 index 000000000..811a3d594 --- /dev/null +++ b/examples/visual_vep/02r__pattern_reversal_longitudinal.py @@ -0,0 +1,337 @@ +# --- +# jupyter: +# jupytext: +# formats: ipynb,py:sphinx +# text_representation: +# extension: .py +# format_name: sphinx +# format_version: '1.1' +# jupytext_version: 1.19.1 +# kernelspec: +# display_name: eeg-experiments +# language: python +# name: python3 +# --- + +""" + +# Pattern Reversal VEP: Longitudinal Analysis + +Aggregates per-session biomarker JSON files written by +``01r__pattern_reversal_viz.py`` into a single per-subject longitudinal +trend view. Each prior session must have been analysed by 01r at least +once (so its ``biomarkers.json`` exists in the recording directory); +this script does **not** recompute biomarkers from raw EEG — it reads +the persisted JSON and plots trends across sessions. + +Why this split: the per-session analysis is expensive (load EEG, +filter, epoch, bootstrap), but the trend view is cheap (read JSON, +plot). Persisting biomarkers per session means new longitudinal points +are added in seconds rather than minutes, and individual sessions can +be re-analysed without invalidating the rest of the series. + +Outputs: + +- A summary DataFrame indexed by session, with one row per (session, + reference scheme) pair. +- A trend figure showing IOLD, per-eye P100 latency, check-size slope + difference, and the O1/O2 and P7/P8 Δ-asymmetry contrasts as a + function of session. +- Bootstrap CI bands around the IOLD trend so a real shift is visually + separable from session noise. + +""" + +############################################################################### +# ## Setup +# +# + +import json +import warnings +warnings.filterwarnings('ignore') + +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt +plt.ion() + +from eegnb import get_recording_dir + +# sphinx_gallery_thumbnail_number = 1 + +############################################################################### +# ## Configure subject / experiment selectors +# +# Change these placeholders to point at the subject + experiment + site whose +# sessions you want to plot. The script will glob every session directory it +# finds under that path and load each session's ``biomarkers.json``. +# + +# --- CHANGE THESE PLACEHOLDERS TO POINT AT YOUR OWN SUBJECT --------------- +SUBJECT_ID = 0 # placeholder — your subject number +DEVICE_NAME = 'cyton' +EXPERIMENT = 'visual-PRVEP' +DISPLAY = 'quest-2_120Hz' # display tag in the site path +MONTAGE = 'cap' # 'cap' or 'mark-iv' +SITE = f'{DISPLAY}_{MONTAGE}' +# -------------------------------------------------------------------------- + +# Reference scheme to plot as the primary line (the other is overlaid lighter). +PRIMARY_REF = 'Linked Mastoid M1+M2' # alternative: 'Fz (ISCEV)' + +############################################################################### +# ## Discover session JSON files +# +# Each session's ``biomarkers.json`` lives in the per-session recording dir. +# We use SESSION_NB = 0 just to anchor the path (we then walk one level up to +# find every sibling session directory). +# + +anchor_dir = get_recording_dir(DEVICE_NAME, EXPERIMENT, SUBJECT_ID, 0, site=SITE) +subject_dir = anchor_dir.parent # .../subject{SID}/ +print(f"[scan] subject dir: {subject_dir}") + +session_dirs = sorted(p for p in subject_dir.glob('session*') if p.is_dir()) +print(f"[scan] found {len(session_dirs)} session directories") + +session_jsons = [] +for sd in session_dirs: + bj = sd / 'biomarkers.json' + if bj.exists(): + session_jsons.append(bj) + else: + print(f"[skip] {sd.name}: no biomarkers.json (run 01r on it first)") + +if not session_jsons: + raise RuntimeError( + f"No biomarkers.json files found under {subject_dir}. " + "Run 01r__pattern_reversal_viz.py against each session first to " + "generate the per-session biomarker payloads." + ) + +print(f"[scan] {len(session_jsons)} session(s) have biomarkers.json") + +############################################################################### +# ## Load and flatten into a DataFrame +# +# One row per (session, reference scheme), with biomarker fields flattened +# into named columns. Fields that aren't present in a session land as NaN. +# + + +def flatten_session(payload): + """One dict per ref scheme in a session payload, with metadata + biomarkers.""" + rows = [] + meta = { + 'subject_id': payload.get('subject_id'), + 'session_nb': payload.get('session_nb'), + 'analysis_timestamp': payload.get('analysis_timestamp'), + 'site': payload.get('site'), + 'montage': payload.get('montage'), + 'n_trials_kept_overall': payload.get('n_trials_kept'), + 'pc_lag_ms_mean': payload.get('pc_lag_ms_mean'), + } + for ref_label, ref_block in (payload.get('references') or {}).items(): + if ref_block is None: + continue + row = dict(meta) + row['ref'] = ref_label + row['n_trials_kept'] = ref_block.get('n_trials_kept') + + iold = ref_block.get('iold') or {} + row['p100_left_ms'] = iold.get('p100_left_ms') + row['p100_right_ms'] = iold.get('p100_right_ms') + row['p100_left_uv'] = iold.get('p100_left_uv') + row['p100_right_uv'] = iold.get('p100_right_uv') + row['iold_ms'] = iold.get('iold_ms') + row['iold_flag'] = iold.get('flag') + + slope = ref_block.get('slope') or {} + row['slope_left'] = slope.get('slope_left_ms_per_arcmin') + row['slope_right'] = slope.get('slope_right_ms_per_arcmin') + row['slope_diff'] = slope.get('slope_diff') + + amp = ref_block.get('amplitude') or {} + row['amp_ratio'] = amp.get('ratio') + row['amp_log2'] = amp.get('log2_ratio') + + boot = ref_block.get('bootstrap') or {} + row['boot_iold_median'] = boot.get('iold_median_ms') + row['boot_iold_lo'] = boot.get('iold_ci_lo_ms') + row['boot_iold_hi'] = boot.get('iold_ci_hi_ms') + row['boot_left_lo'] = boot.get('left_p100_ci_lo_ms') + row['boot_left_hi'] = boot.get('left_p100_ci_hi_ms') + row['boot_right_lo'] = boot.get('right_p100_ci_lo_ms') + row['boot_right_hi'] = boot.get('right_p100_ci_hi_ms') + + hd_o1o2 = ref_block.get('hemi_delta_o1o2') or {} + row['delta_lat_o1o2'] = hd_o1o2.get('d_lat') + row['delta_log2_o1o2'] = hd_o1o2.get('d_log2') + + hd_p7p8 = ref_block.get('hemi_delta_p7p8') or {} + row['delta_lat_p7p8'] = hd_p7p8.get('d_lat') + row['delta_log2_p7p8'] = hd_p7p8.get('d_log2') + + comp_d = ref_block.get('composite_delta') or {} + row['composite_d_lat'] = comp_d.get('d_lat') + row['composite_d_log2'] = comp_d.get('d_log2') + + rows.append(row) + return rows + + +all_rows = [] +for jp in session_jsons: + with open(jp, 'r', encoding='utf-8') as f: + payload = json.load(f) + all_rows.extend(flatten_session(payload)) + +df = pd.DataFrame(all_rows).sort_values(['session_nb', 'ref']).reset_index(drop=True) +print(f"\n[load] longitudinal dataframe: {len(df)} rows, {len(df['session_nb'].unique())} sessions, " + f"{len(df['ref'].unique())} reference scheme(s)") +print(df[['session_nb', 'ref', 'n_trials_kept', 'iold_ms', 'slope_diff', + 'delta_lat_o1o2', 'delta_lat_p7p8']].to_string(index=False)) + +############################################################################### +# ## Trend figure +# +# Six panels: +# 1. IOLD over sessions, with bootstrap 95% CI band on the primary ref. +# ±8 ms clinical threshold drawn as horizontal guides. +# 2. Per-eye P100 latency, with bootstrap CI bands. +# 3. Check-size slope difference (L − R, ms / arcmin). +# 4. O1/O2 inter-ocular Δ-asymmetry (latency). +# 5. P7/P8 inter-ocular Δ-asymmetry (latency). +# 6. Inter-ocular amplitude ratio (log2). +# + +fig, axes = plt.subplots(2, 3, figsize=(18, 10)) +ax_iold, ax_p100, ax_slope, ax_d_o1o2, ax_d_p7p8, ax_amp = axes.flatten() + +ref_styles = { + PRIMARY_REF: dict(color='#2c3e50', linewidth=2.0, alpha=1.0, marker='o'), +} +# Lighter style for the secondary reference scheme. +for ref in df['ref'].unique(): + if ref != PRIMARY_REF: + ref_styles[ref] = dict(color='#95a5a6', linewidth=1.2, alpha=0.6, marker='s', linestyle='--') + +# --- 1. IOLD over time ----------------------------------------------------- +for ref, sub in df.groupby('ref'): + style = ref_styles.get(ref, dict(color='gray')) + ax_iold.plot(sub['session_nb'], sub['iold_ms'], label=f'{ref} (point)', **style) + if ref == PRIMARY_REF and sub['boot_iold_lo'].notna().any(): + ax_iold.fill_between(sub['session_nb'], sub['boot_iold_lo'], sub['boot_iold_hi'], + color=style['color'], alpha=0.15, label=f'{ref} (95% CI)') +ax_iold.axhline(8.0, color='#c0392b', linestyle=':', alpha=0.6, label='±8 ms clinical threshold') +ax_iold.axhline(-8.0, color='#c0392b', linestyle=':', alpha=0.6) +ax_iold.axhline(0, color='black', linestyle='-', alpha=0.3) +ax_iold.set_title('IOLD (L − R P100 latency) over sessions') +ax_iold.set_xlabel('Session number') +ax_iold.set_ylabel('IOLD (ms)') +ax_iold.grid(True, alpha=0.3) +ax_iold.legend(loc='best', fontsize=8) + +# --- 2. Per-eye P100 latency ---------------------------------------------- +for ref, sub in df.groupby('ref'): + style = ref_styles.get(ref, dict(color='gray')) + is_primary = (ref == PRIMARY_REF) + ax_p100.plot(sub['session_nb'], sub['p100_left_ms'], color='#2980b9', + marker='o' if is_primary else 's', + linestyle='-' if is_primary else '--', + alpha=style['alpha'], linewidth=style['linewidth'], + label=f'Left eye [{ref}]') + ax_p100.plot(sub['session_nb'], sub['p100_right_ms'], color='#c0392b', + marker='o' if is_primary else 's', + linestyle='-' if is_primary else '--', + alpha=style['alpha'], linewidth=style['linewidth'], + label=f'Right eye [{ref}]') + if is_primary and sub['boot_left_lo'].notna().any(): + ax_p100.fill_between(sub['session_nb'], sub['boot_left_lo'], sub['boot_left_hi'], + color='#2980b9', alpha=0.15) + ax_p100.fill_between(sub['session_nb'], sub['boot_right_lo'], sub['boot_right_hi'], + color='#c0392b', alpha=0.15) +ax_p100.set_title('Per-eye P100 latency over sessions') +ax_p100.set_xlabel('Session number') +ax_p100.set_ylabel('P100 latency (ms)') +ax_p100.grid(True, alpha=0.3) +ax_p100.legend(loc='best', fontsize=7) + +# --- 3. Check-size slope difference --------------------------------------- +for ref, sub in df.groupby('ref'): + style = ref_styles.get(ref, dict(color='gray')) + ax_slope.plot(sub['session_nb'], sub['slope_diff'], label=ref, **style) +ax_slope.axhline(0, color='black', linestyle='-', alpha=0.3) +ax_slope.set_title('Inter-ocular check-size slope difference (L − R)') +ax_slope.set_xlabel('Session number') +ax_slope.set_ylabel('Δ slope (ms / arcmin)') +ax_slope.grid(True, alpha=0.3) +ax_slope.legend(loc='best', fontsize=8) + +# --- 4. O1/O2 Δ-asymmetry -------------------------------------------------- +for ref, sub in df.groupby('ref'): + style = ref_styles.get(ref, dict(color='gray')) + ax_d_o1o2.plot(sub['session_nb'], sub['delta_lat_o1o2'], label=ref, **style) +ax_d_o1o2.axhline(0, color='black', linestyle='-', alpha=0.3) +ax_d_o1o2.set_title('O1/O2 inter-ocular Δ-asymmetry (latency)') +ax_d_o1o2.set_xlabel('Session number') +ax_d_o1o2.set_ylabel('Δlat (ms) — eye-dependent skew') +ax_d_o1o2.grid(True, alpha=0.3) +ax_d_o1o2.legend(loc='best', fontsize=8) + +# --- 5. P7/P8 Δ-asymmetry -------------------------------------------------- +for ref, sub in df.groupby('ref'): + style = ref_styles.get(ref, dict(color='gray')) + ax_d_p7p8.plot(sub['session_nb'], sub['delta_lat_p7p8'], label=ref, **style) +ax_d_p7p8.axhline(0, color='black', linestyle='-', alpha=0.3) +ax_d_p7p8.set_title('P7/P8 inter-ocular Δ-asymmetry (latency)') +ax_d_p7p8.set_xlabel('Session number') +ax_d_p7p8.set_ylabel('Δlat (ms) — eye-dependent skew') +ax_d_p7p8.grid(True, alpha=0.3) +ax_d_p7p8.legend(loc='best', fontsize=8) + +# --- 6. Amplitude ratio (log2) -------------------------------------------- +for ref, sub in df.groupby('ref'): + style = ref_styles.get(ref, dict(color='gray')) + ax_amp.plot(sub['session_nb'], sub['amp_log2'], label=ref, **style) +ax_amp.axhline(1.0, color='#c0392b', linestyle=':', alpha=0.6, label='±1 log2 threshold') +ax_amp.axhline(-1.0, color='#c0392b', linestyle=':', alpha=0.6) +ax_amp.axhline(0, color='black', linestyle='-', alpha=0.3) +ax_amp.set_title('Inter-ocular amplitude ratio over sessions') +ax_amp.set_xlabel('Session number') +ax_amp.set_ylabel('log2(L / R)') +ax_amp.grid(True, alpha=0.3) +ax_amp.legend(loc='best', fontsize=8) + +fig.suptitle(f'PR-VEP longitudinal trends — subject {SUBJECT_ID:04d}, {SITE}', + fontsize=14, fontweight='bold') +fig.tight_layout() +plt.show() + +############################################################################### +# ## Baseline summary +# +# Treat the first 3–5 sessions (or all sessions before a known intervention) +# as the baseline, and report the mean ± std of each biomarker. This bracket +# is what subsequent sessions need to fall outside of to count as a real shift. +# + +BASELINE_LAST_SESSION_NB = None # set to e.g. 4 to mark sessions <= 4 as baseline + +if BASELINE_LAST_SESSION_NB is not None: + baseline_mask = df['session_nb'] <= BASELINE_LAST_SESSION_NB +else: + baseline_mask = slice(None) + print("[baseline] BASELINE_LAST_SESSION_NB not set — using ALL sessions as baseline. " + "Edit the constant above to define a baseline window.") + +baseline_df = df.loc[baseline_mask] if BASELINE_LAST_SESSION_NB is not None else df +metrics = ['iold_ms', 'slope_diff', 'amp_log2', + 'delta_lat_o1o2', 'delta_lat_p7p8', + 'p100_left_ms', 'p100_right_ms'] + +primary = baseline_df[baseline_df['ref'] == PRIMARY_REF] +if len(primary) > 0: + print(f"\n[baseline] reference = {PRIMARY_REF}, n_sessions = {len(primary)}") + print(primary[metrics].agg(['mean', 'std']).T.to_string()) diff --git a/examples/visual_vep/README.txt b/examples/visual_vep/README.txt new file mode 100644 index 000000000..e201880e7 --- /dev/null +++ b/examples/visual_vep/README.txt @@ -0,0 +1 @@ +Visual VEP diff --git a/pyproject.toml b/pyproject.toml index de7bf0dc5..e9fb602e5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -9,6 +9,7 @@ addopts = """ --current-env --ignore-glob 'examples/**.py' --ignore-glob '**/baseline_task.py' + --ignore 'tests/test_run_experiments.py' """ testpaths = [ "eegnb", diff --git a/requirements.txt b/requirements.txt index 001b69229..c5cf6db4a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,8 +3,7 @@ scikit-learn>=0.23.2 pandas>=1.1.4 -numpy>=1.26.0; python_version >= "3.9" -numpy<=1.24.4; python_version == "3.8" +numpy>=1.26.0 mne>=0.20.8 seaborn>=0.11.0 pyriemann>=0.2.7 @@ -15,10 +14,7 @@ pysocks>=1.7.1 pyserial>=3.5 h5py>=3.1.0 pytest-shutil -pyo>=1.0.3; platform_system == "Linux" -#pynput requires pyobjc, psychopy requires a version less than 8, setting pyobjc to -# a specific version prevents an endless dependency resolution loop. -pyobjc==7.3; sys_platform == 'darwin' +pyobjc>=8.0; sys_platform == 'darwin' airium>=0.1.0 attrdict>=2.0.1 attrdict3 @@ -26,6 +22,7 @@ attrdict3 ## ~~ Streaming Requirements ~~ +pyxid2 muselsl>=2.0.2 # Upgrade from 1.10.5 to 1.16.2 so the arm64 lib is available to macOS Apple Silicon for preventing error: # pylsl/liblsl64.dylib' (mach-o file, but is an incompatible architecture (have 'x86_64', need 'arm64e' or 'arm64')) @@ -35,10 +32,7 @@ pysocks>=1.7.1 pyserial>=3.5 h5py>=3.1.0 pytest-shutil -pyo>=1.0.3; platform_system == "Linux" -#pynput requires pyobjc, psychopy requires a version less than 8, setting pyobjc to -# a specific version prevents an endless dependency resolution loop. -pyobjc==7.3; sys_platform == 'darwin' +pyobjc>=8.0; sys_platform == 'darwin' #Removed keyboard dependency due segmentation fault on Apple Silicon: https://github.com/boppreh/keyboard/issues/507 pynput airium>=0.1.0 @@ -46,50 +40,55 @@ attrdict>=2.0.1 attrdict3 click +# Tests +pytest +pytest-cov +nbval + ## ~~ Stimpres Requirements ~~ -#pynput requires pyobjc, psychopy requires a version less than 8, setting pyobjc to -# a specific version prevents an endless dependency resolution loop. -pyobjc==7.3; sys_platform == 'darwin' +pyobjc>=8.0; sys_platform == 'darwin' #upgrade psychopy to use newer wxpython dependency which is prebuilt for m1 support. -psychopy==2025.1.1 -#needed for macos arm sound support: https://github.com/psychopy/psychopy-sounddevice/pull/4 --e psychopy-sounddevice +# Forked with a fix for Rift stereo projection matrix crash under strict-ndim psychxr. +psychopy @ git+https://github.com/pellet/psychopy.git@v2026.2.0-rift-fix +#needed for macos arm sound support +psychopy-sounddevice @ git+https://github.com/psychopy/psychopy-sounddevice.git ffpyplayer==4.5.2 # 4.5.3 fails to build as wheel. psychtoolbox scikit-learn>=0.23.2 pandas>=1.1.4 -numpy>=1.26.0; python_version >= "3.9" -numpy==1.24.4; python_version == "3.8" +numpy>=1.26.0 mne>=0.20.8 seaborn>=0.11.0 pysocks>=1.7.1 pyserial>=3.5 h5py>=3.1.0 pytest-shutil -pyo>=1.0.3; platform_system == "Linux" airium>=0.1.0 attrdict>=2.0.1 attrdict3 -# pywinhook needs some special treatment since there are only wheels on PyPI for Python 3.7-3.8, and building requires special tools (swig, VS C++ tools) -# See issue: https://github.com/NeuroTechX/eeg-notebooks/issues/29 -pywinhook>=1.6.0 ; platform_system == "Windows" and (python_version == "3.7" or python_version == "3.8") -pywinhook @ https://github.com/ActivityWatch/wheels/raw/master/pywinhook/pyWinhook-1.6.2-cp39-cp39-win_amd64.whl ; platform_system == "Windows" and python_version == "3.9" - # pyglet downgrade to prevent threadmode warning on windows # See issue: https://github.com/psychopy/psychopy/issues/2876 pyglet==1.4.11 ; platform_system == "Windows" -# Oculus/Quest VR support - currently only supported on Windows and -# <= 3.9, otherwise will need Oculus PC SDK to build wheel. -psychxr>=0.2.4rc2; platform_system == "Windows" and python_version <= "3.9" - +# Quest-link VR support - prebuilt Windows wheels from the fork's GitHub +# release (0.2.6rc1 adds Python 3.10 support). +# psychxr/quest-link is Windows-only. +psychxr @ https://github.com/pellet/psychxr/releases/download/v0.2.6rc1/psychxr-0.2.6rc1-cp310-cp310-win_amd64.whl ; platform_system == "Windows" and python_version == "3.10" +# PsychoPy/PsychXR does not yet officially support Python > 3.10; the wheels below are experimental. +psychxr @ https://github.com/pellet/psychxr/releases/download/v0.2.6rc1/psychxr-0.2.6rc1-cp311-cp311-win_amd64.whl ; platform_system == "Windows" and python_version == "3.11" +psychxr @ https://github.com/pellet/psychxr/releases/download/v0.2.6rc1/psychxr-0.2.6rc1-cp312-cp312-win_amd64.whl ; platform_system == "Windows" and python_version == "3.12" +psychxr @ https://github.com/pellet/psychxr/releases/download/v0.2.6rc1/psychxr-0.2.6rc1-cp313-cp313-win_amd64.whl ; platform_system == "Windows" and python_version == "3.13" +# Used for generating checkerboard in pattern reversal experiment +stimupy -## ~~ Docsbuild Requirements ~~ +## ~~ Docsbuild Requirements ~~ +setuptools<81 # brainflow imports pkg_resources at runtime; setuptools 81 deprecated it and 82 removed it +python-dotenv recommonmark brainflow numpydoc diff --git a/tests/test_acquisition.py b/tests/test_acquisition.py new file mode 100644 index 000000000..9eaec5ff3 --- /dev/null +++ b/tests/test_acquisition.py @@ -0,0 +1,56 @@ +import os +import time +import pandas as pd +import pytest +from eegnb.devices.eeg import EEG + +def test_synthetic_acquisition(tmp_path): + """ + Test the data acquisition pipeline using a synthetic BrainFlow board. + This verifies that we can initialize a device, start a stream, + record data, and save it to a CSV file in a CI-friendly way. + """ + # Use a temporary file for recording + save_fn = tmp_path / "synthetic_data.csv" + + # Initialize EEG with synthetic board + # BrainFlow synthetic board (ID -1) works without hardware + eeg = EEG(device='synthetic') + + # Verify metadata initialization + assert eeg.backend == 'brainflow' + assert eeg.sfreq == 250 # Default for synthetic board + assert len(eeg.channels) > 0 + + # Start stream and capture data + # We specify a short duration for the test + record_duration = 2 + eeg.start(str(save_fn), duration=record_duration + 5) + + # Simulate some experiment time + time.sleep(record_duration) + + # Push a few synthetic markers + eeg.push_sample(marker=1) + time.sleep(0.1) + eeg.push_sample(marker=2) + + # Stop recording and release session + eeg.stop() + + # Verify file creation and content + assert save_fn.exists() + + # Read the data back + data = pd.read_csv(save_fn) + + # Basic data validation + assert len(data) > 0 + assert 'timestamps' in data.columns + assert 'stim' in data.columns + + # Check if markers were recorded (may vary slightly based on timing) + # but we should at least see non-zero values in the stim column + assert (data['stim'] != 0).any() + + print(f"Acquired {len(data)} samples with columns: {list(data.columns)}")