diff --git a/backend/api/modules/declarative_router.py b/backend/api/modules/declarative_router.py
index b8e35a0..f11f9d5 100644
--- a/backend/api/modules/declarative_router.py
+++ b/backend/api/modules/declarative_router.py
@@ -369,7 +369,7 @@ def get_nonchainsuccession_violations(job_id: str, request: Request) -> ReturnGr
return request.app.state.jobs[job_id].result.get("nonchainsuccession", [])
-# **************** Retrieving Declarative Model Attributes - PM4PY ****************
+# **************** Retrieving Declarative Model Attributes - PQL Queries ****************
@router.get("/get_always_after_pql/")
diff --git a/backend/pql_queries/declarative_queries.py b/backend/pql_queries/declarative_queries.py
index 9993b0f..a69219b 100644
--- a/backend/pql_queries/declarative_queries.py
+++ b/backend/pql_queries/declarative_queries.py
@@ -19,7 +19,8 @@
# **************** Formatting Function ****************
-def format_graph_and_table (curr_df: pd.DataFrame) -> ReturnGraphType:
+
+def format_graph_and_table(curr_df: pd.DataFrame) -> ReturnGraphType:
"""Formats the DataFrame into a graph and table structure.
Args:
@@ -28,44 +29,55 @@ def format_graph_and_table (curr_df: pd.DataFrame) -> ReturnGraphType:
Returns:
ReturnGraphType: A dictionary containing the formatted graph and table.
"""
- output: ReturnGraphType = {"graphs": [], "tables": []}
- if not curr_df.empty :
- if curr_df.shape[1] == 3 :
+ output: ReturnGraphType = {"graphs": [], "tables": []}
+
+ if not curr_df.empty:
+ if curr_df.shape[1] == 3:
nodes = []
edges = []
- for i, row in curr_df.iterrows(): # type: ignore
+ for i, row in curr_df.iterrows(): # type: ignore
nodes.append(str(row[curr_df.columns[0]])) # type: ignore
nodes.append(str(row[curr_df.columns[1]])) # type: ignore
- edges.append({ # type: ignore
- 'from': str(row[curr_df.columns[0]]), # type: ignore
- 'to': str(row[curr_df.columns[1]]), # type: ignore
- 'label': str(row[curr_df.columns[2]]) # type: ignore
- })
-
- nodes = [{'id': str(ele)} for ele in list(set(list(nodes)))] # type: ignore
- output["graphs"].append({
- "nodes": nodes, # type: ignore
- "edges": edges
- })
-
- headers = list(curr_df.columns)
- rows = curr_df.values.tolist() # type: ignore
- output["tables"].append({
- "headers": headers, # type: ignore
- "rows": [[str(ele) for ele in row] for row in rows] # type: ignore
- })
- else :
- headers = list(curr_df.columns)
- rows = curr_df.values.tolist() # type: ignore
- output["tables"].append({
- "headers": headers, # type: ignore
- "rows": [[str(ele) for ele in row] for row in rows] # type: ignore
- })
+ edges.append(
+ { # type: ignore
+ "from": str(row[curr_df.columns[0]]), # type: ignore
+ "to": str(row[curr_df.columns[1]]), # type: ignore
+ "label": str(row[curr_df.columns[2]]), # type: ignore
+ }
+ )
+
+ nodes = [{"id": str(ele)} for ele in list(set(list(nodes)))] # type: ignore
+ output["graphs"].append(
+ {
+ "nodes": nodes, # type: ignore
+ "edges": edges,
+ }
+ )
+
+ headers = list(curr_df.columns)
+ rows = curr_df.values.tolist() # type: ignore
+ output["tables"].append(
+ {
+ "headers": headers, # type: ignore
+ "rows": [[str(ele) for ele in row] for row in rows], # type: ignore
+ }
+ )
+ else:
+ headers = list(curr_df.columns)
+ rows = curr_df.values.tolist() # type: ignore
+ output["tables"].append(
+ {
+ "headers": headers, # type: ignore
+ "rows": [[str(ele) for ele in row] for row in rows], # type: ignore
+ }
+ )
return output
+
# **************** PQL Functions ****************
+
# Always before
def get_always_before_relation(celonis: CelonisConnectionManager) -> ReturnGraphType:
"""Compute Always-Before summary using PQL.
@@ -76,23 +88,37 @@ def get_always_before_relation(celonis: CelonisConnectionManager) -> ReturnGraph
Returns:
ReturnGraphType: A dictionary containing the formatted graph and table.
"""
- target_df: pd.DataFrame = DataFrame(columns=["Activity A", "Activity B", "# Occurrences"])
- act_table = get_activities(celonis) # type: ignore
- activitiy_pairs = list(combinations(act_table["Activity"].to_list(), 2)) # type: ignore
- for i, pair in enumerate(activitiy_pairs): # type: ignore
+ target_df: pd.DataFrame = DataFrame(
+ columns=["Activity A", "Activity B", "# Occurrences"]
+ )
+ act_table = get_activities(celonis) # type: ignore
+ activitiy_pairs = list(combinations(act_table["Activity"].to_list(), 2)) # type: ignore
+ for i, pair in enumerate(activitiy_pairs): # type: ignore
query = {
"A before B": f"""MATCH_PROCESS ("ACTIVITIES"."concept:name", NODE ['{pair[0]}'] as src,
NODE ['{pair[1]}'] as tgt CONNECTED BY EVENTUALLY [src , tgt])""",
"B before A": f"""MATCH_PROCESS ("ACTIVITIES"."concept:name", NODE ['{pair[1]}'] as src,
NODE ['{pair[0]}'] as tgt CONNECTED BY EVENTUALLY [src , tgt])""",
}
- pair_df = celonis.get_dataframe_from_celonis(query) # type: ignore
- if (pair_df["B before A"] == 1).any() and not (pair_df["A before B"] == 1).any(): # type: ignore
- target_df.loc[i] = [pair[1], pair[0], int((pair_df["B before A"] == 1).sum())] # type: ignore
- elif (pair_df["A before B"] == 1).any() and not (pair_df["B before A"] == 1).any(): # type: ignore
- target_df.loc[i] = [pair[0], pair[1], int((pair_df["A before B"] == 1).sum())] # type: ignore
- output = format_graph_and_table(target_df)
- return output
+ pair_df = celonis.get_dataframe_from_celonis(query) # type: ignore
+ if (pair_df["B before A"] == 1).any() and not (
+ pair_df["A before B"] == 1
+ ).any(): # type: ignore
+ target_df.loc[i] = [
+ pair[1],
+ pair[0],
+ int((pair_df["B before A"] == 1).sum()),
+ ] # type: ignore
+ elif (pair_df["A before B"] == 1).any() and not (
+ pair_df["B before A"] == 1
+ ).any(): # type: ignore
+ target_df.loc[i] = [
+ pair[0],
+ pair[1],
+ int((pair_df["A before B"] == 1).sum()),
+ ] # type: ignore
+ output = format_graph_and_table(target_df)
+ return output
# Always after
@@ -107,18 +133,28 @@ def get_always_after_relation(celonis: CelonisConnectionManager) -> ReturnGraphT
"""
target_df = DataFrame(columns=["Activity A", "Activity B", "# Occurrences"])
act_table = get_activities(celonis)
- activitiy_pairs = list(combinations(act_table["Activity"].to_list(), 2)) # type: ignore
- for i, pair in enumerate(activitiy_pairs): # type: ignore
+ activitiy_pairs = list(combinations(act_table["Activity"].to_list(), 2)) # type: ignore
+ for i, pair in enumerate(activitiy_pairs): # type: ignore
query = {
"A after B": f"""MATCH_PROCESS ("ACTIVITIES"."concept:name", NODE ['{pair[1]}'] as src,
NODE ['{pair[0]}'] as tgt CONNECTED BY EVENTUALLY [src , tgt])""",
"B after A": f"""MATCH_PROCESS ("ACTIVITIES"."concept:name", NODE ['{pair[0]}'] as src,
NODE ['{pair[1]}'] as tgt CONNECTED BY EVENTUALLY [src , tgt])""",
}
- pair_df = celonis.get_dataframe_from_celonis(query) # type: ignore
- if (pair_df["B after A"] == 1).any() and not (pair_df["A after B"] == 1).any(): # type: ignore
- target_df.loc[i] = [pair[1], pair[0], int((pair_df["B after A"] == 1).sum())] # type: ignore
- elif (pair_df["A after B"] == 1).any() and not (pair_df["B after A"] == 1).any(): # type: ignore
- target_df.loc[i] = [pair[0], pair[1], int((pair_df["A after B"] == 1).sum())] # type: ignore
- output = format_graph_and_table(target_df)
- return output
\ No newline at end of file
+ pair_df = celonis.get_dataframe_from_celonis(query) # type: ignore
+ if (pair_df["B after A"] == 1).any() and not (pair_df["A after B"] == 1).any(): # type: ignore
+ target_df.loc[i] = [
+ pair[1],
+ pair[0],
+ int((pair_df["B after A"] == 1).sum()),
+ ] # type: ignore
+ elif (pair_df["A after B"] == 1).any() and not (
+ pair_df["B after A"] == 1
+ ).any(): # type: ignore
+ target_df.loc[i] = [
+ pair[0],
+ pair[1],
+ int((pair_df["A after B"] == 1).sum()),
+ ] # type: ignore
+ output = format_graph_and_table(target_df)
+ return output
diff --git a/frontend/src/ArrowGraph.js b/frontend/src/ArrowGraph.js
new file mode 100644
index 0000000..4b18c41
--- /dev/null
+++ b/frontend/src/ArrowGraph.js
@@ -0,0 +1,225 @@
+import React, { useEffect, useRef } from "react";
+import * as d3 from "d3";
+
+const ArrowGraph = ({ graphData }) => {
+ const svgRef = useRef();
+
+ useEffect(() => {
+ const width = 900;
+ const height = 600;
+ const padding = 50;
+ const nodeRadius = 38;
+
+ const svg = d3.select(svgRef.current);
+ svg.selectAll("*").remove();
+
+ const zoomGroup = svg.append("g");
+
+ svg.call(
+ d3.zoom().on("zoom", (event) => {
+ zoomGroup.attr("transform", event.transform);
+ })
+ );
+
+ const { nodes = [], edges = [] } = graphData;
+
+ const colorScale = d3.scaleOrdinal(d3.schemeCategory10);
+ const nodeColorMap = {};
+ nodes.forEach((n, i) => {
+ nodeColorMap[n.id] = colorScale(i);
+ });
+
+ const nodeById = new Map(nodes.map((n) => [n.id, n]));
+
+ const edgeMap = new Map();
+ edges.forEach((e) => {
+ const key = `${e.from}|||${e.to}`;
+ const label = String(e.label);
+ if (edgeMap.has(key)) {
+ edgeMap.get(key).labels.push(label);
+ } else {
+ edgeMap.set(key, {
+ from: e.from,
+ to: e.to,
+ labels: [label],
+ });
+ }
+ });
+
+ const d3Edges = Array.from(edgeMap.values())
+ .map((e) => ({
+ source: nodeById.get(e.from),
+ target: nodeById.get(e.to),
+ label: e.labels.join(", "),
+ weight: e.labels.length,
+ color: nodeColorMap[e.from] || "#ccc",
+ }))
+ .filter((e) => e.source && e.target);
+
+ // Marker for arrows — refX must match the shortened line offset
+ svg
+ .append("defs")
+ .append("marker")
+ .attr("id", "arrowhead")
+ .attr("viewBox", "0 -5 10 10")
+ .attr("refX", 10) // arrowhead offset relative to the shortened line
+ .attr("refY", 0)
+ .attr("markerWidth", 6)
+ .attr("markerHeight", 6)
+ .attr("orient", "auto")
+ .append("path")
+ .attr("d", "M0,-5L10,0L0,5")
+ .attr("fill", "#555");
+
+ // Initialize node positions
+ nodes.forEach((node) => {
+ node.x = Math.random() * (width - 2 * padding) + padding;
+ node.y = Math.random() * (height - 2 * padding) + padding;
+ });
+
+ const simulation = d3
+ .forceSimulation(nodes)
+ .force(
+ "link",
+ d3
+ .forceLink(d3Edges)
+ .id((d) => d.id)
+ .distance(220)
+ )
+ .force("charge", d3.forceManyBody().strength(-600))
+ .force("center", d3.forceCenter(width / 2, height / 2));
+
+ const link = zoomGroup
+ .append("g")
+ .attr("stroke-opacity", 0.6)
+ .selectAll("line")
+ .data(d3Edges)
+ .enter()
+ .append("line")
+ .attr("stroke-width", (d) => Math.max(1.5, Math.min(8, d.weight)))
+ .attr("stroke", (d) => d.color)
+ .attr("marker-end", "url(#arrowhead)");
+
+ const edgeLabel = zoomGroup
+ .append("g")
+ .selectAll("text")
+ .data(d3Edges)
+ .enter()
+ .append("text")
+ .attr("font-size", 10)
+ .attr("fill", "#333")
+ .attr("text-anchor", "middle")
+ .append("tspan")
+ .text((d) => d.label);
+
+ const nodeGroup = zoomGroup
+ .append("g")
+ .selectAll("g")
+ .data(nodes)
+ .enter()
+ .append("g")
+ .call(
+ d3.drag().on("start", dragStart).on("drag", dragged).on("end", dragEnd)
+ );
+
+ nodeGroup
+ .append("circle")
+ .attr("r", nodeRadius)
+ .attr("fill", (d) => nodeColorMap[d.id])
+ .attr("stroke", "#333")
+ .attr("stroke-width", 1.5);
+
+ nodeGroup.append("title").text((d) => d.id);
+
+ nodeGroup
+ .append("text")
+ .attr("text-anchor", "middle")
+ .attr("dy", 0)
+ .attr("font-size", 10)
+ .attr("fill", "#fff")
+ .style("pointer-events", "none")
+ .selectAll("tspan")
+ .data((d) =>
+ d.id.length > 12
+ ? [d.id.slice(0, d.id.length / 2), d.id.slice(d.id.length / 2)]
+ : [d.id]
+ )
+ .enter()
+ .append("tspan")
+ .attr("x", 0)
+ .attr("dy", (d, i) => (i === 0 ? 0 : 12))
+ .text((d) => d);
+
+ simulation.on("tick", () => {
+ nodeGroup.attr("transform", (d) => {
+ d.x = Math.max(padding, Math.min(width - padding, d.x));
+ d.y = Math.max(padding, Math.min(height - padding, d.y));
+ return `translate(${d.x},${d.y})`;
+ });
+
+ link
+ .attr("x1", (d) => d.source.x)
+ .attr("y1", (d) => d.source.y)
+ .attr("x2", (d) => {
+ const dx = d.target.x - d.source.x;
+ const dy = d.target.y - d.source.y;
+ const dist = Math.sqrt(dx * dx + dy * dy);
+ return d.target.x - (dx * (nodeRadius + 5)) / dist;
+ })
+ .attr("y2", (d) => {
+ const dx = d.target.x - d.source.x;
+ const dy = d.target.y - d.source.y;
+ const dist = Math.sqrt(dx * dx + dy * dy);
+ return d.target.y - (dy * (nodeRadius + 5)) / dist;
+ });
+
+ // Track label counts between same source-target pairs
+ const labelOffsets = new Map();
+ edgeLabel
+ .attr("x", (d) => (d.source.x + d.target.x) / 2)
+ .attr("y", function (d) {
+ const key = `${d.source.id}|||${d.target.id}`;
+ const count = labelOffsets.get(key) || 0;
+ labelOffsets.set(key, count + 1);
+
+ // Alternate label positions around the line
+ const baseY = (d.source.y + d.target.y) / 2;
+ const offset = (count - 1) * 12;
+ return baseY + (count % 2 === 0 ? offset : -offset);
+ });
+ });
+
+ setTimeout(() => simulation.stop(), 3000);
+
+ function dragStart(event, d) {
+ if (!event.active) simulation.alphaTarget(0.3).restart();
+ d.fx = d.x;
+ d.fy = d.y;
+ }
+
+ function dragged(event, d) {
+ d.fx = event.x;
+ d.fy = event.y;
+ }
+
+ function dragEnd(event, d) {
+ if (!event.active) simulation.alphaTarget(0);
+ }
+ }, [graphData]);
+
+ return (
+
+ );
+};
+
+export default ArrowGraph;
diff --git a/frontend/src/ResultsPage.js b/frontend/src/ResultsPage.js
index 0adb567..caaa27e 100644
--- a/frontend/src/ResultsPage.js
+++ b/frontend/src/ResultsPage.js
@@ -16,6 +16,10 @@ import {
import Graph from "./Graph";
import Table from "./Table";
+import ArrowGraph from "./ArrowGraph";
+import Tooltip from "@mui/material/Tooltip";
+import IconButton from "@mui/material/IconButton";
+import InfoIcon from "@mui/icons-material/Info";
import {
GET_GENERAL_INSIGHTS,
@@ -41,7 +45,9 @@ import {
GET_RESPONDED_EXISTENCE_VIOLATIONS,
GET_COEXISTENCE_VIOLATIONS,
GET_RESPONSE_VIOLATIONS,
+ GET_DECL_ALWAYS_AFTER_PQL,
GET_PRECEDENCE_VIOLATIONS,
+ GET_DECL_ALWAYS_BEFORE_PQL,
GET_SUCCESSION_VIOLATIONS,
GET_ALTPRECEDENCE_VIOLATIONS,
GET_ALTSUCCESION_VIOLATIONS,
@@ -141,6 +147,23 @@ const LOG_SKELETON_OPTIONS = [
},
];
+// -------------------- Log Skeleton Descriptions --------------------
+const logSkeletonDescriptions = {
+ "Get Equivalence": "Checks which activities always occur together in cases.",
+ "Get Equivalence (PQL)":
+ "PQL-based variant for identifying equivalent activities.",
+ "Always Before": "Activity A always occurs before Activity B.",
+ "Always Before (PQL)": "PQL-based rule for 'always before' relationships.",
+ "Always After": "Activity A always occurs after Activity B.",
+ "Always After (PQL)": "PQL-based rule for 'always after' relationships.",
+ "Never Together": "Detects mutually exclusive activity pairs.",
+ "Never Together (PQL)": "PQL variant for mutual exclusivity.",
+ "Directly Follows": "Identifies direct succession between activities.",
+ "Activity Frequencies": "Counts how frequently each activity occurs.",
+ "Directly Follows and Count (PQL)":
+ "Shows direct follow relationships with frequency (PQL).",
+};
+
// --------------------Declarative Constraints Options --------------------
const DECLARATIVE_OPTIONS = [
{ label: "Existence", endpoint: GET_EXISTANCE_VIOLATIONS },
@@ -153,7 +176,9 @@ const DECLARATIVE_OPTIONS = [
},
{ label: "Co-Existence", endpoint: GET_COEXISTENCE_VIOLATIONS },
{ label: "Always After", endpoint: GET_RESPONSE_VIOLATIONS },
+ { label: "Always After (PQL)", endpoint: GET_DECL_ALWAYS_AFTER_PQL },
{ label: "Always Before", endpoint: GET_PRECEDENCE_VIOLATIONS },
+ { label: "Always Before (PQL)", endpoint: GET_DECL_ALWAYS_BEFORE_PQL },
{ label: "Succession", endpoint: GET_SUCCESSION_VIOLATIONS },
{
label: "Alternate Precedence",
@@ -186,6 +211,43 @@ const DECLARATIVE_OPTIONS = [
},
];
+// -------------------- Declarative Constraints Descriptions --------------------
+const declarativeDescriptions = {
+ Existence:
+ "Ensures that a particular activity occurs at least once in a trace.",
+ Never: "Specifies that a particular activity must not occur in a trace.",
+ "Exactly Once": "Restricts an activity to occur exactly one time per trace.",
+ Initially: "Requires that a specific activity is the first in every trace.",
+ "Responded Existence":
+ "If Activity A occurs, then Activity B must also occur somewhere in the trace.",
+ "Co-Existence":
+ "Activities A and B must either both occur or both be absent in a trace.",
+ "Always After":
+ "If Activity A occurs, Activity B must follow it at some point.",
+ "Always After (PQL)":
+ "If Activity A occurs, Activity B must follow it at some point using PQL queries.",
+ "Always Before":
+ "If Activity B occurs, Activity A must have occurred before it.",
+ "Always Before (PQL)":
+ "If Activity B occurs, Activity A must have occurred before it using PQL queries.",
+ Succession:
+ "If Activity A occurs, then Activity B must occur afterwards, and vice versa.",
+ "Alternate Precedence":
+ "Every occurrence of Activity B must be preceded by exactly one occurrence of Activity A.",
+ "Alternate Succession":
+ "Every occurrence of Activity A must be followed by exactly one occurrence of Activity B.",
+ "Immediately After":
+ "Activity B must directly follow Activity A whenever A occurs.",
+ "Immediately Before":
+ "Activity A must directly precede Activity B whenever B occurs.",
+ "Chain Succession":
+ "Every occurrence of Activity A must be immediately followed by B, and every B must be preceded by A.",
+ "Non Co-Existence":
+ "Activities A and B cannot both appear in the same trace.",
+ "Non Succession": "Activity A should never be followed by Activity B.",
+ "Non Chain Succession": "Activity B must not immediately follow Activity A.",
+};
+
// -------------------- Resource Options --------------------
const resourceOptions = {
@@ -224,6 +286,64 @@ const resourceOptions = {
],
};
+// -------------------- Resource Descriptions --------------------
+const resourceDescriptions = {
+ "Handover of Work":
+ "The Handover of Work metric measures how many times an individual is followed by another individual in the execution of a business process.",
+ Subcontracting:
+ "The Subcontracting metric calculates how many times the work of an individual is interleaved by the work of another individual, only to eventually “return” to the original individual.",
+ "Working together":
+ "The Working Together metric calculates how many times two individuals work together to resolve a process instance.",
+ "Similar Activities":
+ "The Similar Activities metric calculates how similar the work patterns are between two individuals.",
+ "Role Discovery":
+ "The organizational role is a set of activities in the log that are executed by a similar (multi)set of resources.",
+ "Group Relative Focus":
+ "The Group Relative Focus metric specifies for a given work how much a resource group performed this type of work compared to the overall workload of the group. It can be used to measure how the workload of a resource group is distributed over different types of work, i.e., work diversification of the group.",
+ "Group Relative Stake":
+ "The Group Relative Stake metric specifies for a given work how much this type of work was performed by a certain resource group among all groups. It can be used to measure how the workload devoted to a certain type of work is distributed over resource groups in an organizational model, i.e., work participation by different groups.",
+ "Group Coverage":
+ "The Group Coverage metric with respect to a given type of work, specifies the proportion of members of a resource group that performed this type of work.",
+ "Group Member Contributions":
+ "The Group Member Contribution metric of a member of a resource group with respect to a given type of work specifies how much of this type of work by the group was performed by the member. It can be used to measure how the workload of the entire group devoted to a certain type of work is distributed over the group members.",
+ "Distinct Activities":
+ "Number of distinct activities done by a resource in a given time interval [t1, t2).",
+ "Distinct Activities (using PQL)":
+ "Number of distinct activities done by a resource in a given time interval [t1, t2) using PQL Queries.",
+ "Activity Frequency":
+ "Fraction of completions of a given activity a by a given resource r during a given time slot [t1, t2), with respect to the total number of activity completions by resource r during [t1, t2).",
+ "Activity Frequency (using PQL)":
+ "Fraction of completions of a given activity a by a given resource r during a given time slot [t1, t2), with respect to the total number of activity completions by resource r during [t1, t2) using PQL queries.",
+ "Activity Completions":
+ "The number of activity instances completed by a given resource during a given time slot.",
+ "Activity Completions (using PQL)":
+ "The number of activity instances completed by a given resource during a given time slot using PQL queries.",
+ "Case-Completions":
+ "The number of cases completed during a given time slot in which a given resource was involved.",
+ "Case-Completions (using PQL)":
+ "The number of cases completed during a given time slot in which a given resource was involved using PQL queries.",
+ "Fraction-Case Completions":
+ "The fraction of cases completed during a given time slot in which a given resource was involved with respect to the total number of cases completed during the time slot.",
+ "Fraction-Case Completions (using PQL)":
+ "The fraction of cases completed during a given time slot in which a given resource was involved with respect to the total number of cases completed during the time slot using PQL queries.",
+ "Average workload":
+ "The average number of activities started by a given resource but not completed at a moment in time.",
+ "Average workload (using PQL)":
+ "The average number of activities started by a given resource but not completed at a moment in time using PQL queries.",
+ Multitasking:
+ "The fraction of active time during which a given resource is involved in more than one activity with respect to the resource's active time.",
+ "Average Activity Duration":
+ "The average duration of instances of a given activity completed during a given time slot by a given resource.",
+ "Average case duration":
+ "The average duration of cases completed during a given time slot in which a given resource was involved.",
+ "Interaction Two Resources":
+ "The number of cases completed during a given time slot in which two given resources were involved.",
+ "Interaction Two Resources (using PQL)":
+ "The number of cases completed during a given time slot in which two given resources were involved using PQL queries.",
+ "Social Position":
+ "The fraction of resources involved in the same cases with a given resource during a given time slot with respect to the total number of resources active during the time slot.",
+};
+
// -------------------- Main Component --------------------
const ResultsPage = () => {
@@ -250,6 +370,7 @@ const ResultsPage = () => {
// Declarative Constraints
const [minSupport, setMinSupport] = useState("");
const [minConfidence, setMinConfidence] = useState("");
+ const [zetaValue, setZetaValue] = useState("");
const [declJobId, setDeclJobId] = useState(null);
const [selectedDeclOption, setSelectedDeclOption] = useState("");
const [declLoading, setDeclLoading] = useState(false);
@@ -452,9 +573,14 @@ const ResultsPage = () => {
}
try {
- const url = `${COMPUTE_DECLARATIVE_CONSTRAINTS}?min_support=${parseFloat(
- minSupport
- )}&min_confidence=${parseFloat(minConfidence)}`;
+ const queryParams = new URLSearchParams({
+ min_support: parseFloat(minSupport),
+ min_confidence: parseFloat(minConfidence),
+ });
+ if (zetaValue) {
+ queryParams.append("fitness_score", parseFloat(zetaValue));
+ }
+ const url = `${COMPUTE_DECLARATIVE_CONSTRAINTS}?${queryParams.toString()}`;
const res = await fetch(url, { method: "GET" });
const data = await res.json();
setDeclJobId(data.job_id);
@@ -467,7 +593,8 @@ const ResultsPage = () => {
};
const handleDeclarativeOptionSelect = async (option) => {
- if (!declJobId) {
+ const isPQL = option.label.endsWith("(PQL)");
+ if (!isPQL && !declJobId) {
alert("Please compute constraints first.");
return;
}
@@ -480,8 +607,11 @@ const ResultsPage = () => {
try {
let attempts = 0;
let resultData = null;
+ const endpoint = option.endpoint;
+ const fetchURL = isPQL ? endpoint : `${endpoint}/${declJobId}`;
+
while (attempts < 20) {
- const res = await fetch(`${option.endpoint}/${declJobId}`);
+ const res = await fetch(fetchURL);
if (res.ok) {
resultData = await res.json();
break;
@@ -652,18 +782,32 @@ const ResultsPage = () => {
const renderGraphAndTable = () => (
<>
- {graphData.map((graph, idx) => (
-
- Graph {idx + 1}
-
-
- ))}
+ {graphData.map((graph, idx) => {
+ const useArrowGraph = [
+ "get_always_before_pql",
+ "get_always_after_pql",
+ "get_directly_follows_and_count",
+ ].includes(selectedOption);
+
+ return (
+
+ Graph {idx + 1}
+ {useArrowGraph ? (
+
+ ) : (
+
+ )}
+
+ );
+ })}
+
{tableData.map((table, idx) => (
Table {idx + 1}