Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 23 additions & 0 deletions research-rebuttal-response-pack/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
# Research Rebuttal Response Pack

Deterministic author rebuttal and revision planning for SCIBASE AI research assistants.

The module ingests a manuscript review packet and produces:

- a reviewer response matrix,
- concern clusters,
- evidence requests,
- revision tasks,
- unresolved-risk flags,
- resubmission readiness scoring,
- an editor-facing summary.

It is dependency-free and uses synthetic local data for the demo.

## Run

```bash
npm run check
npm test
npm run demo
```
66 changes: 66 additions & 0 deletions research-rebuttal-response-pack/data/sample-review-packet.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
{
"manuscript": {
"id": "ms-graph-retrieval-2026",
"title": "Graph Retrieval Signals Improve Literature Discovery",
"decision": "major-revision",
"claims": [
{
"id": "c1",
"text": "The graph retrieval ranker improves recall for interdisciplinary literature discovery.",
"evidenceIds": ["ev-benchmark", "ev-ablation"]
},
{
"id": "c2",
"text": "The method remains robust across biomedical and materials-science corpora.",
"evidenceIds": ["ev-benchmark"]
},
{
"id": "c3",
"text": "The online prototype can be reproduced from the released scripts.",
"evidenceIds": ["ev-scripts"]
}
],
"evidence": [
{
"id": "ev-benchmark",
"kind": "benchmark",
"description": "Cross-domain retrieval benchmark with 12,000 labeled query-document pairs.",
"status": "available"
},
{
"id": "ev-ablation",
"kind": "ablation",
"description": "Ablation table comparing graph, citation, and keyword signals.",
"status": "available"
},
{
"id": "ev-scripts",
"kind": "code",
"description": "Prototype reproduction scripts without pinned dependency versions.",
"status": "partial"
}
]
},
"reviews": [
{
"reviewer": "R1",
"severity": "major",
"text": "The core recall claim is interesting, but the manuscript does not show confidence intervals or a statistical test for the benchmark delta."
},
{
"reviewer": "R2",
"severity": "major",
"text": "The materials-science evaluation appears much smaller than the biomedical corpus. Please explain whether the robustness claim is overgeneralized."
},
{
"reviewer": "R2",
"severity": "minor",
"text": "The reproduction scripts do not pin package versions, so the prototype is not reliably reproducible."
},
{
"reviewer": "R3",
"severity": "major",
"text": "The ablation table is useful, but it is unclear which graph features drive the largest performance gains."
}
]
}
Binary file added research-rebuttal-response-pack/docs/demo.mp4
Binary file not shown.
22 changes: 22 additions & 0 deletions research-rebuttal-response-pack/docs/demo.svg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
26 changes: 26 additions & 0 deletions research-rebuttal-response-pack/docs/requirement-map.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
# Requirement Map

This slice targets SCIBASE issue #16 by adding a focused author rebuttal and revision response pack for the AI-powered research assistant workflow.

## Covered capabilities

- Auto peer-review reports: reviewer findings are converted into response rows with concern topics, response strategy, and editor-facing summaries.
- Reproducibility checks: partial code/dependency evidence creates high-priority evidence requests and unresolved-risk flags.
- Research gap discovery support: scope/generalization concerns create revision tasks that identify underpowered domains and missing evidence.
- Responsible AI assistant behavior: the module is deterministic, credential-free, and uses synthetic local data only.

## Distinction from existing #16 slices

- Not a broad assistant suite.
- Not a protocol trace or evidence-grounding packet.
- Not a reviewer calibration bench.
- Not a redline packet.
- Not a replication planner.

The output is specifically an author-facing rebuttal matrix plus editor-facing resubmission readiness packet.

## Verification

- `npm run check`
- `npm test`
- `npm run demo`
12 changes: 12 additions & 0 deletions research-rebuttal-response-pack/package.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
{
"name": "research-rebuttal-response-pack",
"version": "1.0.0",
"description": "Deterministic author rebuttal and revision response planning for SCIBASE AI research assistants.",
"type": "module",
"scripts": {
"check": "node --check src/rebuttal-response-pack.js && node --check scripts/demo.js",
"test": "node --test test/rebuttal-response-pack.test.js",
"demo": "node scripts/demo.js"
},
"license": "MIT"
}
23 changes: 23 additions & 0 deletions research-rebuttal-response-pack/scripts/demo.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
import samplePacket from "../data/sample-review-packet.json" with { type: "json" };
import { buildRebuttalResponsePack } from "../src/rebuttal-response-pack.js";

const pack = buildRebuttalResponsePack(samplePacket);

console.log(`Rebuttal response pack for: ${pack.title}`);
console.log(`Decision: ${pack.decision}`);
console.log(`Readiness: ${pack.resubmissionReadiness.status} (${pack.resubmissionReadiness.score}/100)`);
console.log("\nResponse matrix:");
for (const row of pack.responseMatrix) {
console.log(`- ${row.id} [${row.severity}] ${row.concernTopic}`);
console.log(` claims: ${row.linkedClaimIds.join(", ") || "manual-link-required"}`);
console.log(` strategy: ${row.responseStrategy}`);
console.log(` tasks: ${row.revisionTasks.join(" | ")}`);
}

console.log("\nConcern clusters:");
for (const cluster of pack.concernClusters) {
console.log(`- ${cluster.topic}: ${cluster.responseIds.join(", ")} (${cluster.highestSeverity})`);
}

console.log("\nEditor summary:");
console.log(JSON.stringify(pack.editorSummary, null, 2));
191 changes: 191 additions & 0 deletions research-rebuttal-response-pack/src/rebuttal-response-pack.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,191 @@
const TOPIC_RULES = [
{
topic: "statistical support",
patterns: ["confidence", "statistical", "test", "significance", "delta"],
responseStrategy: "Add quantitative uncertainty support before defending the claim.",
revisionTemplate: "Add confidence intervals, test method, and benchmark delta interpretation."
},
{
topic: "scope and generalization",
patterns: ["robust", "overgeneralized", "smaller", "corpus", "domain"],
responseStrategy: "Narrow the claim or add cross-domain evidence.",
revisionTemplate: "Clarify domain coverage, sample balance, and limits of generalization."
},
{
topic: "reproducibility",
patterns: ["reproduc", "scripts", "package", "dependency", "version"],
responseStrategy: "Convert reproducibility concern into a concrete artifact fix.",
revisionTemplate: "Pin dependencies, publish run commands, and add environment checks."
},
{
topic: "feature attribution",
patterns: ["ablation", "features", "drive", "unclear", "gains"],
responseStrategy: "Explain which mechanism supports the reported improvement.",
revisionTemplate: "Expand ablation narrative with ranked feature contributions."
}
];

const SEVERITY_WEIGHT = {
minor: 1,
moderate: 2,
major: 3,
critical: 4
};

function normalizeText(value) {
return String(value || "").toLowerCase();
}

function classifyConcern(reviewText) {
const text = normalizeText(reviewText);
const scored = TOPIC_RULES.map((rule) => ({
...rule,
score: rule.patterns.filter((pattern) => text.includes(pattern)).length
})).sort((a, b) => b.score - a.score || a.topic.localeCompare(b.topic));

return scored[0].score > 0 ? scored[0] : {
topic: "editorial clarity",
responseStrategy: "Acknowledge the concern and make the affected claim easier to audit.",
revisionTemplate: "Add a targeted clarification and cite the supporting artifact."
};
}

function relevantClaims(manuscript, reviewText) {
const text = normalizeText(reviewText);
return manuscript.claims.filter((claim) => {
const claimTokens = normalizeText(claim.text).split(/[^a-z0-9]+/).filter((token) => token.length > 5);
return claimTokens.some((token) => text.includes(token));
});
}

function evidenceById(manuscript) {
return new Map(manuscript.evidence.map((entry) => [entry.id, entry]));
}

function evidenceRequests(manuscript, claims, concern) {
const evidenceMap = evidenceById(manuscript);
const requests = [];

for (const claim of claims) {
const partialEvidence = claim.evidenceIds
.map((id) => evidenceMap.get(id))
.filter((entry) => entry && entry.status !== "available");

if (partialEvidence.length > 0) {
requests.push({
claimId: claim.id,
priority: "high",
request: `Complete ${partialEvidence.map((entry) => entry.kind).join(", ")} evidence for claim ${claim.id}.`
});
}
}

if (concern.topic === "statistical support") {
requests.push({
claimId: claims[0]?.id || "manuscript",
priority: "high",
request: "Attach uncertainty intervals and statistical test output for the benchmark result."
});
}

if (concern.topic === "scope and generalization") {
requests.push({
claimId: claims[0]?.id || "manuscript",
priority: "medium",
request: "Add domain-size table and revise wording where evidence is underpowered."
});
}

return requests;
}

function buildResponseRow(manuscript, review, index) {
const concern = classifyConcern(review.text);
const claims = relevantClaims(manuscript, review.text);
const weight = SEVERITY_WEIGHT[review.severity] || 2;
const requests = evidenceRequests(manuscript, claims, concern);
const unresolvedRisk = weight >= 3 && requests.some((request) => request.priority === "high");

return {
id: `response-${index + 1}`,
reviewer: review.reviewer,
severity: review.severity,
concernTopic: concern.topic,
reviewerConcern: review.text,
linkedClaimIds: claims.map((claim) => claim.id),
responseStrategy: concern.responseStrategy,
authorResponseDraft: `We thank ${review.reviewer} for this ${concern.topic} concern. We will revise the manuscript by: ${concern.revisionTemplate}`,
revisionTasks: [
concern.revisionTemplate,
...requests.map((request) => request.request)
],
evidenceRequests: requests,
unresolvedRisk,
riskScore: weight + requests.length + (unresolvedRisk ? 2 : 0)
};
}

function clusterRows(rows) {
const clusters = new Map();
for (const row of rows) {
if (!clusters.has(row.concernTopic)) {
clusters.set(row.concernTopic, {
topic: row.concernTopic,
responseIds: [],
highestSeverity: row.severity,
sharedTasks: new Set()
});
}
const cluster = clusters.get(row.concernTopic);
cluster.responseIds.push(row.id);
row.revisionTasks.forEach((task) => cluster.sharedTasks.add(task));
if ((SEVERITY_WEIGHT[row.severity] || 0) > (SEVERITY_WEIGHT[cluster.highestSeverity] || 0)) {
cluster.highestSeverity = row.severity;
}
}

return [...clusters.values()].map((cluster) => ({
...cluster,
sharedTasks: [...cluster.sharedTasks]
}));
}

function readiness(rows) {
const totalRisk = rows.reduce((sum, row) => sum + row.riskScore, 0);
const unresolvedMajor = rows.filter((row) => row.unresolvedRisk).length;
const score = Math.max(0, 100 - totalRisk * 5 - unresolvedMajor * 10);

return {
score,
status: score >= 80 ? "ready-with-minor-edits" : score >= 55 ? "major-revision-needed" : "not-ready",
blockers: rows
.filter((row) => row.unresolvedRisk)
.map((row) => `${row.id}: ${row.concernTopic}`)
};
}

export function buildRebuttalResponsePack(packet) {
if (!packet?.manuscript || !Array.isArray(packet.reviews)) {
throw new Error("A manuscript and review list are required.");
}

const responseMatrix = packet.reviews.map((review, index) => buildResponseRow(packet.manuscript, review, index));
const concernClusters = clusterRows(responseMatrix);
const resubmissionReadiness = readiness(responseMatrix);

return {
manuscriptId: packet.manuscript.id,
title: packet.manuscript.title,
decision: packet.manuscript.decision,
responseMatrix,
concernClusters,
resubmissionReadiness,
editorSummary: {
responseCount: responseMatrix.length,
highRiskResponses: responseMatrix.filter((row) => row.unresolvedRisk).map((row) => row.id),
recommendedNextAction: resubmissionReadiness.status === "not-ready"
? "Resolve high-risk evidence gaps before resubmission."
: "Prepare tracked-change manuscript and response letter."
}
};
}
Loading