diff --git a/backend/api/modules/declarative_router.py b/backend/api/modules/declarative_router.py
index 2092865..7d1167a 100644
--- a/backend/api/modules/declarative_router.py
+++ b/backend/api/modules/declarative_router.py
@@ -14,6 +14,7 @@
from backend.celonis_connection.celonis_connection_manager import (
CelonisConnectionManager,
)
+from backend.pql_queries import declarative_queries
# **************** Type Aliases ****************
@@ -33,6 +34,7 @@ async def compute_declarative_constraints(
request: Request,
min_support: float = Query(0.3, description="Minimum support ratio"),
min_confidence: float = Query(0.75, description="Minimum confidence ratio"),
+ fitness_score: float = Query(1.0, description="Fitness score for the constraints"),
celonis: CelonisConnectionManager = Depends(get_celonis_connection),
) -> Dict[str, str]:
"""Computes the declarative constraints and stores it.
@@ -48,6 +50,7 @@ async def compute_declarative_constraints(
Defaults to Depends(get_celonis_connection).
min_support: The minimum support ratio for the constraints.
min_confidence: The minimum confidence ratio for the constraints.
+ fitness_score: The fitness score for the constraints.
Returns:
A dictionary containing the job ID of the scheduled task.
@@ -65,12 +68,13 @@ async def compute_declarative_constraints(
celonis,
min_support,
min_confidence,
+ fitness_score,
)
return {"job_id": job_id}
-# **************** Retrieving Declarative Model Attributes ****************
+# **************** Retrieving Declarative Model Attributes - PM4PY ****************
@router.get("/get_existance_violations/{job_id}")
@@ -363,3 +367,42 @@ def get_nonchainsuccession_violations(job_id: str, request: Request) -> ReturnGr
verify_correct_job_module(job_id, request, MODULE_NAME)
return request.app.state.jobs[job_id].result.get("nonchainsuccession", [])
+
+
+# **************** Retrieving Declarative Model Attributes - PQL Queries ****************
+
+
+@router.get("/get_always_after_pql/")
+def get_always_after_pql(
+ request: Request,
+ celonis: CelonisConnectionManager = Depends(get_celonis_connection),
+) -> Dict[str, Union[List[TableType], List[GraphType]]]:
+ """Retrieves the always-after relations via PQL.
+
+ Args:
+ request: The FastAPI request object.
+ celonis: The CelonisManager dependency injection.
+
+ Returns:
+ A JSON object with "tables" and "graphs" keys.
+ """
+ result_df = declarative_queries.get_always_after_relation(celonis)
+ return result_df
+
+
+@router.get("/get_always_before_pql/")
+def get_always_before_pql(
+ request: Request,
+ celonis: CelonisConnectionManager = Depends(get_celonis_connection),
+) -> Dict[str, Union[List[TableType], List[GraphType]]]:
+ """Retrieves the always-before relations via PQL.
+
+ Args:
+ request: The FastAPI request object.
+ celonis: The CelonisManager dependency injection.
+
+ Returns:
+ A JSON object with "tables" and "graphs" keys.
+ """
+ result_df = declarative_queries.get_always_before_relation(celonis)
+ return result_df
diff --git a/backend/api/tasks/declarative_constraints_tasks.py b/backend/api/tasks/declarative_constraints_tasks.py
index f2072a2..e83c605 100644
--- a/backend/api/tasks/declarative_constraints_tasks.py
+++ b/backend/api/tasks/declarative_constraints_tasks.py
@@ -15,6 +15,7 @@ def compute_and_store_declarative_constraints(
celonis: CelonisConnectionManager,
min_support_ratio: float = 0.3,
min_confidence_ratio: float = 0.75,
+ fitness_score: float = 1.0,
) -> None:
"""Computes the declarative constraints and stores it in the app state.
@@ -24,6 +25,7 @@ def compute_and_store_declarative_constraints(
celonis: The CelonisConnectionManager instance.
min_support_ratio: The minimum support ratio for the constraints.
min_confidence_ratio: The minimum confidence ratio for the constraints.
+ fitness_score: The fitness score for the constraints.
"""
# Get the job record from the app state
rec: JobStatus = app.state.jobs[job_id]
@@ -42,6 +44,7 @@ def compute_and_store_declarative_constraints(
rec.result = dc.update_model_and_run_all_rules(
min_support_ratio=min_support_ratio,
min_confidence_ratio=min_confidence_ratio,
+ fitness_score=fitness_score,
)
rec.status = "complete"
diff --git a/backend/conformance_checking/declarative_constraints.py b/backend/conformance_checking/declarative_constraints.py
index 7f8be4a..f992065 100644
--- a/backend/conformance_checking/declarative_constraints.py
+++ b/backend/conformance_checking/declarative_constraints.py
@@ -34,6 +34,7 @@ def __init__(
log: pd.DataFrame,
min_support_ratio: Optional[float] = 0.3,
min_confidence_ratio: Optional[float] = 0.75,
+ fitness_score: Optional[float] = 1.0,
case_id_col: Optional[str] = None,
activity_col: Optional[str] = None,
timestamp_col: Optional[str] = None,
@@ -48,6 +49,8 @@ def __init__(
Defaults to 0.3.
min_confidence_ratio: The minimum confidence ratio for discovering rules.
Defaults to 0.75.
+ fitness_score: The fitness score threshold for conformance checking.
+ Defaults to 1.0.
case_id_col : The name of the column containing case IDs.
activity_col : The name of the column containing activity names.
timestamp_col : The name of the column containing timestamps.
@@ -55,6 +58,7 @@ def __init__(
self.log = log
self.min_support_ratio = min_support_ratio
self.min_confidence_ratio = min_confidence_ratio
+ self.fitness_score = fitness_score
self.declare_model: Optional[DeclareModelType] = None
self.case_id_col: Optional[str] = case_id_col
self.activity_col: Optional[str] = activity_col
@@ -91,6 +95,7 @@ def run_model(
log: Optional[pd.DataFrame] = None,
min_support_ratio: Optional[float] = None,
min_confidence_ratio: Optional[float] = None,
+ fitness_score: Optional[float] = None,
) -> None:
"""Runs the declarative model on the event log.
@@ -100,6 +105,7 @@ def run_model(
log: The event log to use.
min_support_ratio: The minimum support ratio for discovering rules.
min_confidence_ratio: The minimum confidence ratio for discovering rules.
+ fitness_score: The fitness score threshold for conformance checking.
"""
if log is None:
log = self.log
@@ -107,6 +113,8 @@ def run_model(
min_support_ratio = self.min_support_ratio
if min_confidence_ratio is None:
min_confidence_ratio = self.min_confidence_ratio
+ if fitness_score is not None:
+ self.fitness_score = fitness_score
self.declare_model = pm4py.discover_declare( # type: ignore
log,
min_support_ratio=min_support_ratio,
@@ -121,6 +129,7 @@ def rule_specific_violation_summary(
declare_model: Optional[DeclareModelType] = None,
log: Optional[pd.DataFrame] = None,
rule_name: Optional[str] = None,
+ fitness_score: Optional[float] = None,
verbose: bool = False,
) -> ReturnGraphType:
"""Summarizes number of violations for a declarative rule.
@@ -133,6 +142,7 @@ def rule_specific_violation_summary(
log: The event log. If None, uses the default log.
rule_name: Name of the rule to check.
verbose: Whether to print details for debugging.
+ fitness_score: The fitness score threshold for conformance checking.
Returns:
Summary with graph and table information of rule violations.
@@ -146,11 +156,15 @@ def rule_specific_violation_summary(
declare_model = self.declare_model
if log is None:
log = self.log
+ if fitness_score is not None:
+ self.fitness_score = fitness_score
if str(rule_name) not in self.valid_rules:
raise ValueError(
f"Unsupported rule: '{rule_name}'. Must be one of: {self.valid_rules}"
)
+ if self.fitness_score is None:
+ self.fitness_score = 1.0
if declare_model is None:
raise ValueError("Declare model is stil None. Something has gone wrong.")
@@ -171,7 +185,9 @@ def rule_specific_violation_summary(
else:
A, B = rule_key, None # type: ignore
diagnostics = decl_conf.apply(log, {rule_name: {(A, B): rule_info}}) # type: ignore
- violated = [d for d in diagnostics if d["dev_fitness"] < 1.0] # type: ignore
+ violated = [
+ d for d in diagnostics if d["dev_fitness"] < self.fitness_score
+ ] # type: ignore
violation_count = len(violated) # type: ignore
if violation_count > 0:
@@ -410,6 +426,7 @@ def update_model_and_run_all_rules(
log: Optional[pd.DataFrame] = None,
min_support_ratio: Optional[float] = None,
min_confidence_ratio: Optional[float] = None,
+ fitness_score: Optional[float] = 1.0,
list_of_rules: Optional[List[str]] = None,
run_from_scratch: Optional[bool] = False,
) -> Any:
@@ -423,6 +440,7 @@ def update_model_and_run_all_rules(
valid rules.
run_from_scratch: If True, re-evaluates all rules even if results
stored.
+ fitness_score: The fitness score threshold for conformance checking.
Returns:
Dictionary of all violations.
@@ -435,11 +453,14 @@ def update_model_and_run_all_rules(
min_confidence_ratio = self.min_confidence_ratio
if list_of_rules is None:
list_of_rules = self.valid_rules
+ if fitness_score is not None:
+ self.fitness_score = fitness_score
self.run_model(
log=log,
min_support_ratio=min_support_ratio,
min_confidence_ratio=min_confidence_ratio,
+ fitness_score=fitness_score,
)
for rule in list_of_rules:
self.temp = self.get_declarative_conformance_diagnostics(
diff --git a/backend/pql_queries/declarative_queries.py b/backend/pql_queries/declarative_queries.py
new file mode 100644
index 0000000..483b2b6
--- /dev/null
+++ b/backend/pql_queries/declarative_queries.py
@@ -0,0 +1,161 @@
+"""Queries that can be used to get log-skeleton related data from celonis."""
+
+from itertools import combinations
+from typing import Dict, List, TypeAlias, Union, Any
+
+from pandas import DataFrame
+import pandas as pd
+
+from backend.celonis_connection.celonis_connection_manager import (
+ CelonisConnectionManager,
+)
+from backend.pql_queries.general_queries import get_activities
+
+# **************** Type Aliases ****************
+
+# TableType: TypeAlias = Dict[str, Union[List[str], List[List[str]]]]
+# GraphType: TypeAlias = Dict[str, List[Dict[str, str]]]
+TableType: TypeAlias = Dict[str, Any]
+GraphType: TypeAlias = Dict[str, Any]
+ReturnGraphType: TypeAlias = Dict[str, Union[List[TableType], List[GraphType]]]
+
+# **************** Formatting Function ****************
+
+
+def format_graph_and_table(curr_df: pd.DataFrame) -> ReturnGraphType:
+ """Formats the DataFrame into a graph and table structure.
+
+ Args:
+ curr_df (pd.DataFrame): The DataFrame to format.
+
+ Returns:
+ ReturnGraphType: A dictionary containing the formatted graph and table.
+ """
+ output: ReturnGraphType = {"graphs": [], "tables": []}
+
+ if not curr_df.empty:
+ if curr_df.shape[1] == 3:
+ nodes = []
+ edges = []
+ for i, row in curr_df.iterrows(): # type: ignore
+ nodes.append(str(row[curr_df.columns[0]])) # type: ignore
+ nodes.append(str(row[curr_df.columns[1]])) # type: ignore
+ edges.append(
+ { # type: ignore
+ "from": str(row[curr_df.columns[0]]), # type: ignore
+ "to": str(row[curr_df.columns[1]]), # type: ignore
+ "label": str(row[curr_df.columns[2]]), # type: ignore
+ }
+ )
+
+ nodes = [{"id": str(ele)} for ele in list(set(list(nodes)))] # type: ignore
+ output["graphs"].append(
+ {
+ "nodes": nodes, # type: ignore
+ "edges": edges,
+ }
+ )
+
+ headers = list(curr_df.columns)
+ rows = curr_df.values.tolist() # type: ignore
+ output["tables"].append(
+ {
+ "headers": headers, # type: ignore
+ "rows": [[str(ele) for ele in row] for row in rows], # type: ignore
+ }
+ )
+ else:
+ headers = list(curr_df.columns)
+ rows = curr_df.values.tolist() # type: ignore
+ output["tables"].append(
+ {
+ "headers": headers, # type: ignore
+ "rows": [[str(ele) for ele in row] for row in rows], # type: ignore
+ }
+ )
+ return output
+
+
+# **************** PQL Functions ****************
+
+
+# Always before
+def get_always_before_relation(celonis: CelonisConnectionManager) -> ReturnGraphType:
+ """Compute Always-Before summary using PQL.
+
+ Args:
+ celonis (CelonisConnectionManager): the celonis connection
+
+ Returns:
+ ReturnGraphType: A dictionary containing the formatted graph and table.
+ """
+ target_df: pd.DataFrame = DataFrame(
+ columns=["Activity A", "Activity B", "# Occurrences"]
+ )
+ act_table = get_activities(celonis) # type: ignore
+ activitiy_pairs = list(combinations(act_table["Activity"].to_list(), 2)) # type: ignore
+ for i, pair in enumerate(activitiy_pairs): # type: ignore
+ query = {
+ "A before B": f"""MATCH_PROCESS ("ACTIVITIES"."concept:name", NODE ['{pair[0]}'] as src,
+ NODE ['{pair[1]}'] as tgt CONNECTED BY EVENTUALLY [src , tgt])""",
+ "B before A": f"""MATCH_PROCESS ("ACTIVITIES"."concept:name", NODE ['{pair[1]}'] as src,
+ NODE ['{pair[0]}'] as tgt CONNECTED BY EVENTUALLY [src , tgt])""",
+ }
+ pair_df = celonis.get_dataframe_from_celonis(query) # type: ignore
+ if (pair_df["B before A"] == 1).any() and not (
+ pair_df["A before B"] == 1
+ ).any(): # type: ignore
+ target_df.loc[i] = [
+ pair[1],
+ pair[0],
+ int((pair_df["B before A"] == 1).sum()),
+ ] # type: ignore
+ elif (pair_df["A before B"] == 1).any() and not (
+ pair_df["B before A"] == 1
+ ).any(): # type: ignore
+ target_df.loc[i] = [
+ pair[0],
+ pair[1],
+ int((pair_df["A before B"] == 1).sum()),
+ ] # type: ignore
+ output = format_graph_and_table(target_df)
+ return output
+
+
+# Always after
+def get_always_after_relation(celonis: CelonisConnectionManager) -> ReturnGraphType:
+ """Compute Always-After summary using PQL.
+
+ Args:
+ celonis (CelonisConnectionManager): the celonis connection
+
+ Returns:
+ ReturnGraphType: A dictionary containing the formatted graph and table.
+ """
+ target_df = DataFrame(columns=["Activity A", "Activity B", "# Occurrences"])
+ act_table = get_activities(celonis)
+ activitiy_pairs = list(combinations(act_table["Activity"].to_list(), 2)) # type: ignore
+ for i, pair in enumerate(activitiy_pairs): # type: ignore
+ query = {
+ "A after B": f"""MATCH_PROCESS ("ACTIVITIES"."concept:name", NODE ['{pair[1]}'] as src,
+ NODE ['{pair[0]}'] as tgt CONNECTED BY EVENTUALLY [src , tgt])""",
+ "B after A": f"""MATCH_PROCESS ("ACTIVITIES"."concept:name", NODE ['{pair[0]}'] as src,
+ NODE ['{pair[1]}'] as tgt CONNECTED BY EVENTUALLY [src , tgt])""",
+ }
+ pair_df = celonis.get_dataframe_from_celonis(query) # type: ignore
+ if (pair_df["B after A"] == 1).any() and not (pair_df["A after B"] == 1).any(): # type: ignore
+ target_df.loc[i] = [
+ pair[1],
+ pair[0],
+ int((pair_df["B after A"] == 1).sum()),
+ ] # type: ignore
+ elif (pair_df["A after B"] == 1).any() and not (
+ pair_df["B after A"] == 1
+ ).any(): # type: ignore
+ target_df.loc[i] = [
+ pair[0],
+ pair[1],
+ int((pair_df["A after B"] == 1).sum()),
+ ] # type: ignore
+ output = format_graph_and_table(target_df)
+ return output
diff --git a/backend/pql_queries/declerative_queries.py b/backend/pql_queries/declerative_queries.py
deleted file mode 100644
index 79ae0ec..0000000
--- a/backend/pql_queries/declerative_queries.py
+++ /dev/null
@@ -1 +0,0 @@
-"""Queries used to get declerative constraint related data from celonis."""
diff --git a/frontend/src/ArrowGraph.js b/frontend/src/ArrowGraph.js
new file mode 100644
index 0000000..4b18c41
--- /dev/null
+++ b/frontend/src/ArrowGraph.js
@@ -0,0 +1,225 @@
+import React, { useEffect, useRef } from "react";
+import * as d3 from "d3";
+
+const ArrowGraph = ({ graphData }) => {
+ const svgRef = useRef();
+
+ useEffect(() => {
+ const width = 900;
+ const height = 600;
+ const padding = 50;
+ const nodeRadius = 38;
+
+ const svg = d3.select(svgRef.current);
+ svg.selectAll("*").remove();
+
+ const zoomGroup = svg.append("g");
+
+ svg.call(
+ d3.zoom().on("zoom", (event) => {
+ zoomGroup.attr("transform", event.transform);
+ })
+ );
+
+ const { nodes = [], edges = [] } = graphData;
+
+ const colorScale = d3.scaleOrdinal(d3.schemeCategory10);
+ const nodeColorMap = {};
+ nodes.forEach((n, i) => {
+ nodeColorMap[n.id] = colorScale(i);
+ });
+
+ const nodeById = new Map(nodes.map((n) => [n.id, n]));
+
+ const edgeMap = new Map();
+ edges.forEach((e) => {
+ const key = `${e.from}|||${e.to}`;
+ const label = String(e.label);
+ if (edgeMap.has(key)) {
+ edgeMap.get(key).labels.push(label);
+ } else {
+ edgeMap.set(key, {
+ from: e.from,
+ to: e.to,
+ labels: [label],
+ });
+ }
+ });
+
+ const d3Edges = Array.from(edgeMap.values())
+ .map((e) => ({
+ source: nodeById.get(e.from),
+ target: nodeById.get(e.to),
+ label: e.labels.join(", "),
+ weight: e.labels.length,
+ color: nodeColorMap[e.from] || "#ccc",
+ }))
+ .filter((e) => e.source && e.target);
+
+ // Marker for arrows — refX must match the shortened line offset
+ svg
+ .append("defs")
+ .append("marker")
+ .attr("id", "arrowhead")
+ .attr("viewBox", "0 -5 10 10")
+ .attr("refX", 10) // arrowhead offset relative to the shortened line
+ .attr("refY", 0)
+ .attr("markerWidth", 6)
+ .attr("markerHeight", 6)
+ .attr("orient", "auto")
+ .append("path")
+ .attr("d", "M0,-5L10,0L0,5")
+ .attr("fill", "#555");
+
+ // Initialize node positions
+ nodes.forEach((node) => {
+ node.x = Math.random() * (width - 2 * padding) + padding;
+ node.y = Math.random() * (height - 2 * padding) + padding;
+ });
+
+ const simulation = d3
+ .forceSimulation(nodes)
+ .force(
+ "link",
+ d3
+ .forceLink(d3Edges)
+ .id((d) => d.id)
+ .distance(220)
+ )
+ .force("charge", d3.forceManyBody().strength(-600))
+ .force("center", d3.forceCenter(width / 2, height / 2));
+
+ const link = zoomGroup
+ .append("g")
+ .attr("stroke-opacity", 0.6)
+ .selectAll("line")
+ .data(d3Edges)
+ .enter()
+ .append("line")
+ .attr("stroke-width", (d) => Math.max(1.5, Math.min(8, d.weight)))
+ .attr("stroke", (d) => d.color)
+ .attr("marker-end", "url(#arrowhead)");
+
+ const edgeLabel = zoomGroup
+ .append("g")
+ .selectAll("text")
+ .data(d3Edges)
+ .enter()
+ .append("text")
+ .attr("font-size", 10)
+ .attr("fill", "#333")
+ .attr("text-anchor", "middle")
+ .append("tspan")
+ .text((d) => d.label);
+
+ const nodeGroup = zoomGroup
+ .append("g")
+ .selectAll("g")
+ .data(nodes)
+ .enter()
+ .append("g")
+ .call(
+ d3.drag().on("start", dragStart).on("drag", dragged).on("end", dragEnd)
+ );
+
+ nodeGroup
+ .append("circle")
+ .attr("r", nodeRadius)
+ .attr("fill", (d) => nodeColorMap[d.id])
+ .attr("stroke", "#333")
+ .attr("stroke-width", 1.5);
+
+ nodeGroup.append("title").text((d) => d.id);
+
+ nodeGroup
+ .append("text")
+ .attr("text-anchor", "middle")
+ .attr("dy", 0)
+ .attr("font-size", 10)
+ .attr("fill", "#fff")
+ .style("pointer-events", "none")
+ .selectAll("tspan")
+ .data((d) =>
+ d.id.length > 12
+ ? [d.id.slice(0, d.id.length / 2), d.id.slice(d.id.length / 2)]
+ : [d.id]
+ )
+ .enter()
+ .append("tspan")
+ .attr("x", 0)
+ .attr("dy", (d, i) => (i === 0 ? 0 : 12))
+ .text((d) => d);
+
+ simulation.on("tick", () => {
+ nodeGroup.attr("transform", (d) => {
+ d.x = Math.max(padding, Math.min(width - padding, d.x));
+ d.y = Math.max(padding, Math.min(height - padding, d.y));
+ return `translate(${d.x},${d.y})`;
+ });
+
+ link
+ .attr("x1", (d) => d.source.x)
+ .attr("y1", (d) => d.source.y)
+ .attr("x2", (d) => {
+ const dx = d.target.x - d.source.x;
+ const dy = d.target.y - d.source.y;
+ const dist = Math.sqrt(dx * dx + dy * dy);
+ return d.target.x - (dx * (nodeRadius + 5)) / dist;
+ })
+ .attr("y2", (d) => {
+ const dx = d.target.x - d.source.x;
+ const dy = d.target.y - d.source.y;
+ const dist = Math.sqrt(dx * dx + dy * dy);
+ return d.target.y - (dy * (nodeRadius + 5)) / dist;
+ });
+
+ // Track label counts between same source-target pairs
+ const labelOffsets = new Map();
+ edgeLabel
+ .attr("x", (d) => (d.source.x + d.target.x) / 2)
+ .attr("y", function (d) {
+ const key = `${d.source.id}|||${d.target.id}`;
+ const count = labelOffsets.get(key) || 0;
+ labelOffsets.set(key, count + 1);
+
+ // Alternate label positions around the line
+ const baseY = (d.source.y + d.target.y) / 2;
+ const offset = (count - 1) * 12;
+ return baseY + (count % 2 === 0 ? offset : -offset);
+ });
+ });
+
+ setTimeout(() => simulation.stop(), 3000);
+
+ function dragStart(event, d) {
+ if (!event.active) simulation.alphaTarget(0.3).restart();
+ d.fx = d.x;
+ d.fy = d.y;
+ }
+
+ function dragged(event, d) {
+ d.fx = event.x;
+ d.fy = event.y;
+ }
+
+ function dragEnd(event, d) {
+ if (!event.active) simulation.alphaTarget(0);
+ }
+ }, [graphData]);
+
+ return (
+
+ );
+};
+
+export default ArrowGraph;
diff --git a/frontend/src/ResultsPage.js b/frontend/src/ResultsPage.js
index 0adb567..e32f2dc 100644
--- a/frontend/src/ResultsPage.js
+++ b/frontend/src/ResultsPage.js
@@ -16,6 +16,10 @@ import {
import Graph from "./Graph";
import Table from "./Table";
+import ArrowGraph from "./ArrowGraph";
+import Tooltip from "@mui/material/Tooltip";
+import IconButton from "@mui/material/IconButton";
+import InfoIcon from "@mui/icons-material/Info";
import {
GET_GENERAL_INSIGHTS,
@@ -41,7 +45,9 @@ import {
GET_RESPONDED_EXISTENCE_VIOLATIONS,
GET_COEXISTENCE_VIOLATIONS,
GET_RESPONSE_VIOLATIONS,
+ GET_DECL_ALWAYS_AFTER_PQL,
GET_PRECEDENCE_VIOLATIONS,
+ GET_DECL_ALWAYS_BEFORE_PQL,
GET_SUCCESSION_VIOLATIONS,
GET_ALTPRECEDENCE_VIOLATIONS,
GET_ALTSUCCESION_VIOLATIONS,
@@ -141,6 +147,23 @@ const LOG_SKELETON_OPTIONS = [
},
];
+// -------------------- Log Skeleton Descriptions --------------------
+const logSkeletonDescriptions = {
+ "Get Equivalence": "Pairs of activities, where both activities occur equally often in every trace.",
+ "Get Equivalence (PQL)":
+ "Pairs of activities, where both activities occur equally often in every trace using PQL queries.",
+ "Always Before": "Pairs of activities, where the first activity always occurs before the second one.",
+ "Always Before (PQL)": "Pairs of activities, where the first activity always occurs before the second one using PQL queries.",
+ "Always After": "Pairs of activities, where the second activity always occurs after the first one.",
+ "Always After (PQL)": "Pairs of activities, where the second activity always occurs after the first one using PQL queries.",
+ "Never Together": "Pairs of activities that do not occur together in any trace..",
+ "Never Together (PQL)": "Pairs of activities that do not occur together in any trace using PQL queries.",
+ "Directly Follows": "Pairs of activities, where the first activity can be followed by the second one.",
+ "Activity Frequencies": "Counts how frequently each activity occurs.",
+ "Directly Follows and Count (PQL)":
+ "Pairs of activities, where the first activity can be followed by the second one. The count of occurrences is also provided using PQL queries.",
+};
+
// --------------------Declarative Constraints Options --------------------
const DECLARATIVE_OPTIONS = [
{ label: "Existence", endpoint: GET_EXISTANCE_VIOLATIONS },
@@ -153,7 +176,9 @@ const DECLARATIVE_OPTIONS = [
},
{ label: "Co-Existence", endpoint: GET_COEXISTENCE_VIOLATIONS },
{ label: "Always After", endpoint: GET_RESPONSE_VIOLATIONS },
+ { label: "Always After (PQL)", endpoint: GET_DECL_ALWAYS_AFTER_PQL },
{ label: "Always Before", endpoint: GET_PRECEDENCE_VIOLATIONS },
+ { label: "Always Before (PQL)", endpoint: GET_DECL_ALWAYS_BEFORE_PQL },
{ label: "Succession", endpoint: GET_SUCCESSION_VIOLATIONS },
{
label: "Alternate Precedence",
@@ -186,6 +211,43 @@ const DECLARATIVE_OPTIONS = [
},
];
+// -------------------- Declarative Constraints Descriptions --------------------
+const declarativeDescriptions = {
+ Existence:
+ "Ensures that a particular activity occurs at least once in a trace.",
+ Never: "Specifies that a particular activity must not occur in a trace.",
+ "Exactly Once": "Restricts an activity to occur exactly one time per trace.",
+ Initially: "Requires that a specific activity is the first in every trace.",
+ "Responded Existence":
+ "If Activity A occurs, then Activity B must also occur somewhere in the trace.",
+ "Co-Existence":
+ "Activities A and B must either both occur or both be absent in a trace.",
+ "Always After":
+ "If Activity A occurs, Activity B must follow it at some point.",
+ "Always After (PQL)":
+ "If Activity A occurs, Activity B must follow it at some point using PQL queries.",
+ "Always Before":
+ "If Activity B occurs, Activity A must have occurred before it.",
+ "Always Before (PQL)":
+ "If Activity B occurs, Activity A must have occurred before it using PQL queries.",
+ Succession:
+ "If Activity A occurs, then Activity B must occur afterwards, and vice versa.",
+ "Alternate Precedence":
+ "Every occurrence of Activity B must be preceded by exactly one occurrence of Activity A.",
+ "Alternate Succession":
+ "Every occurrence of Activity A must be followed by exactly one occurrence of Activity B.",
+ "Immediately After":
+ "Activity B must directly follow Activity A whenever A occurs.",
+ "Immediately Before":
+ "Activity A must directly precede Activity B whenever B occurs.",
+ "Chain Succession":
+ "Every occurrence of Activity A must be immediately followed by B, and every B must be preceded by A.",
+ "Non Co-Existence":
+ "Activities A and B cannot both appear in the same trace.",
+ "Non Succession": "Activity A should never be followed by Activity B.",
+ "Non Chain Succession": "Activity B must not immediately follow Activity A.",
+};
+
// -------------------- Resource Options --------------------
const resourceOptions = {
@@ -224,6 +286,64 @@ const resourceOptions = {
],
};
+// -------------------- Resource Descriptions --------------------
+const resourceDescriptions = {
+ "Handover of Work":
+ "The Handover of Work metric measures how many times an individual is followed by another individual in the execution of a business process.",
+ Subcontracting:
+ "The Subcontracting metric calculates how many times the work of an individual is interleaved by the work of another individual, only to eventually “return” to the original individual.",
+ "Working together":
+ "The Working Together metric calculates how many times two individuals work together to resolve a process instance.",
+ "Similar Activities":
+ "The Similar Activities metric calculates how similar the work patterns are between two individuals.",
+ "Role Discovery":
+ "The organizational role is a set of activities in the log that are executed by a similar (multi)set of resources.",
+ "Group Relative Focus":
+ "The Group Relative Focus metric specifies for a given work how much a resource group performed this type of work compared to the overall workload of the group. It can be used to measure how the workload of a resource group is distributed over different types of work, i.e., work diversification of the group.",
+ "Group Relative Stake":
+ "The Group Relative Stake metric specifies for a given work how much this type of work was performed by a certain resource group among all groups. It can be used to measure how the workload devoted to a certain type of work is distributed over resource groups in an organizational model, i.e., work participation by different groups.",
+ "Group Coverage":
+ "The Group Coverage metric with respect to a given type of work, specifies the proportion of members of a resource group that performed this type of work.",
+ "Group Member Contributions":
+ "The Group Member Contribution metric of a member of a resource group with respect to a given type of work specifies how much of this type of work by the group was performed by the member. It can be used to measure how the workload of the entire group devoted to a certain type of work is distributed over the group members.",
+ "Distinct Activities":
+ "Number of distinct activities done by a resource in a given time interval [t1, t2).",
+ "Distinct Activities (using PQL)":
+ "Number of distinct activities done by a resource in a given time interval [t1, t2) using PQL Queries.",
+ "Activity Frequency":
+ "Fraction of completions of a given activity a by a given resource r during a given time slot [t1, t2), with respect to the total number of activity completions by resource r during [t1, t2).",
+ "Activity Frequency (using PQL)":
+ "Fraction of completions of a given activity a by a given resource r during a given time slot [t1, t2), with respect to the total number of activity completions by resource r during [t1, t2) using PQL queries.",
+ "Activity Completions":
+ "The number of activity instances completed by a given resource during a given time slot.",
+ "Activity Completions (using PQL)":
+ "The number of activity instances completed by a given resource during a given time slot using PQL queries.",
+ "Case-Completions":
+ "The number of cases completed during a given time slot in which a given resource was involved.",
+ "Case-Completions (using PQL)":
+ "The number of cases completed during a given time slot in which a given resource was involved using PQL queries.",
+ "Fraction-Case Completions":
+ "The fraction of cases completed during a given time slot in which a given resource was involved with respect to the total number of cases completed during the time slot.",
+ "Fraction-Case Completions (using PQL)":
+ "The fraction of cases completed during a given time slot in which a given resource was involved with respect to the total number of cases completed during the time slot using PQL queries.",
+ "Average workload":
+ "The average number of activities started by a given resource but not completed at a moment in time.",
+ "Average workload (using PQL)":
+ "The average number of activities started by a given resource but not completed at a moment in time using PQL queries.",
+ Multitasking:
+ "The fraction of active time during which a given resource is involved in more than one activity with respect to the resource's active time.",
+ "Average Activity Duration":
+ "The average duration of instances of a given activity completed during a given time slot by a given resource.",
+ "Average case duration":
+ "The average duration of cases completed during a given time slot in which a given resource was involved.",
+ "Interaction Two Resources":
+ "The number of cases completed during a given time slot in which two given resources were involved.",
+ "Interaction Two Resources (using PQL)":
+ "The number of cases completed during a given time slot in which two given resources were involved using PQL queries.",
+ "Social Position":
+ "The fraction of resources involved in the same cases with a given resource during a given time slot with respect to the total number of resources active during the time slot.",
+};
+
// -------------------- Main Component --------------------
const ResultsPage = () => {
@@ -250,6 +370,7 @@ const ResultsPage = () => {
// Declarative Constraints
const [minSupport, setMinSupport] = useState("");
const [minConfidence, setMinConfidence] = useState("");
+ const [zetaValue, setZetaValue] = useState("");
const [declJobId, setDeclJobId] = useState(null);
const [selectedDeclOption, setSelectedDeclOption] = useState("");
const [declLoading, setDeclLoading] = useState(false);
@@ -452,9 +573,14 @@ const ResultsPage = () => {
}
try {
- const url = `${COMPUTE_DECLARATIVE_CONSTRAINTS}?min_support=${parseFloat(
- minSupport
- )}&min_confidence=${parseFloat(minConfidence)}`;
+ const queryParams = new URLSearchParams({
+ min_support: parseFloat(minSupport),
+ min_confidence: parseFloat(minConfidence),
+ });
+ if (zetaValue) {
+ queryParams.append("fitness_score", parseFloat(zetaValue));
+ }
+ const url = `${COMPUTE_DECLARATIVE_CONSTRAINTS}?${queryParams.toString()}`;
const res = await fetch(url, { method: "GET" });
const data = await res.json();
setDeclJobId(data.job_id);
@@ -467,7 +593,8 @@ const ResultsPage = () => {
};
const handleDeclarativeOptionSelect = async (option) => {
- if (!declJobId) {
+ const isPQL = option.label.endsWith("(PQL)");
+ if (!isPQL && !declJobId) {
alert("Please compute constraints first.");
return;
}
@@ -480,8 +607,11 @@ const ResultsPage = () => {
try {
let attempts = 0;
let resultData = null;
+ const endpoint = option.endpoint;
+ const fetchURL = isPQL ? endpoint : `${endpoint}/${declJobId}`;
+
while (attempts < 20) {
- const res = await fetch(`${option.endpoint}/${declJobId}`);
+ const res = await fetch(fetchURL);
if (res.ok) {
resultData = await res.json();
break;
@@ -652,18 +782,32 @@ const ResultsPage = () => {
const renderGraphAndTable = () => (
<>
- {graphData.map((graph, idx) => (
-
- Graph {idx + 1}
-
-
- ))}
+ {graphData.map((graph, idx) => {
+ const useArrowGraph = [
+ "get_always_before_pql",
+ "get_always_after_pql",
+ "get_directly_follows_and_count",
+ ].includes(selectedOption);
+
+ return (
+
+ Graph {idx + 1}
+ {useArrowGraph ? (
+
+ ) : (
+
+ )}
+
+ );
+ })}
+
{tableData.map((table, idx) => (
Table {idx + 1}