From 114f9ebafd602cf16e2d27e793e3c1cd35a7f270 Mon Sep 17 00:00:00 2001 From: Amit Moryossef Date: Tue, 28 Apr 2026 09:21:33 +0000 Subject: [PATCH 1/2] Add Susman & Kimmelman 2024 on CNN+rule-based eye blink detection in LSF Cites the SignLang 2024 paper in the Automating Annotation section. Co-Authored-By: Claude Opus 4.7 (1M context) --- src/index.md | 1 + src/references.bib | 19 +++++++++++++++++++ 2 files changed, 20 insertions(+) diff --git a/src/index.md b/src/index.md index 4c6163d8..745e55f2 100644 --- a/src/index.md +++ b/src/index.md @@ -1175,6 +1175,7 @@ Therefore, data collection often requires significant efforts and costs of on-si ###### Automating Annotation {-} One helpful research direction for collecting more data that enables the development of deployable SLP models is creating tools that can simplify or automate parts of the collection and annotation process. One of the most significant bottlenecks in obtaining more adequate signed language data is the time and scarcity of experts required to perform annotation. Therefore, tools that perform automatic parsing, detection of frame boundaries, extraction of articulatory features, suggestions for lexical annotations, and allow parts of the annotation process to be crowdsourced to non-experts, to name a few, have a high potential to facilitate and accelerate the availability of good data. +Targeting prosodic non-manual annotation specifically, @susman-kimmelman-2024-eye trained a CNN classifier of eye openness (open, in-between, closed) on French Sign Language data and combined it with rule-based temporal aggregation to detect linguistically defined eye blinks, outperforming an EAR-based baseline using MediaPipe landmarks. ### Practice Deaf Collaboration diff --git a/src/references.bib b/src/references.bib index 2449d8f4..f712af18 100644 --- a/src/references.bib +++ b/src/references.bib @@ -4496,3 +4496,22 @@ @inproceedings{rathmann-etal-2024-visuolab url = {https://aclanthology.org/2024.signlang-1.32}, year = {2024} } + +@inproceedings{susman-kimmelman-2024-eye, + title = "Eye Blink Detection in Sign Language Data Using {CNN}s and Rule-Based Methods", + author = "Susman, Margaux and + Kimmelman, Vadim", + editor = "Efthimiou, Eleni and + Fotinea, Stavroula-Evita and + Hanke, Thomas and + Hochgesang, Julie A. and + Mesch, Johanna and + Schulder, Marc", + booktitle = "Proceedings of the LREC-COLING 2024 11th Workshop on the Representation and Processing of Sign Languages: Evaluation of Sign Language Resources", + month = may, + year = "2024", + address = "Torino, Italia", + publisher = "ELRA and ICCL", + url = "https://aclanthology.org/2024.signlang-1.40/", + pages = "361--369" +} From 625db9af328df45b21a816bf24e3c61d71a129a8 Mon Sep 17 00:00:00 2001 From: AmitMY Date: Tue, 28 Apr 2026 11:46:25 +0000 Subject: [PATCH 2/2] Define EAR (Eye Aspect Ratio) abbreviation (review feedback) --- src/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/index.md b/src/index.md index 745e55f2..cc117882 100644 --- a/src/index.md +++ b/src/index.md @@ -1175,7 +1175,7 @@ Therefore, data collection often requires significant efforts and costs of on-si ###### Automating Annotation {-} One helpful research direction for collecting more data that enables the development of deployable SLP models is creating tools that can simplify or automate parts of the collection and annotation process. One of the most significant bottlenecks in obtaining more adequate signed language data is the time and scarcity of experts required to perform annotation. Therefore, tools that perform automatic parsing, detection of frame boundaries, extraction of articulatory features, suggestions for lexical annotations, and allow parts of the annotation process to be crowdsourced to non-experts, to name a few, have a high potential to facilitate and accelerate the availability of good data. -Targeting prosodic non-manual annotation specifically, @susman-kimmelman-2024-eye trained a CNN classifier of eye openness (open, in-between, closed) on French Sign Language data and combined it with rule-based temporal aggregation to detect linguistically defined eye blinks, outperforming an EAR-based baseline using MediaPipe landmarks. +Targeting prosodic non-manual annotation specifically, @susman-kimmelman-2024-eye trained a CNN classifier of eye openness (open, in-between, closed) on French Sign Language data and combined it with rule-based temporal aggregation to detect linguistically defined eye blinks, outperforming an Eye Aspect Ratio (EAR) baseline computed from MediaPipe landmarks. ### Practice Deaf Collaboration