Publications 2017

  • [DOI] G. Acampora, A. Vitiello, B. Di Stefano, W. M. P. van der Aalst, C. W. Günther, and H. M. W. Verbeek, “IEEE 1849TM: The XES Standard: The Second IEEE Standard Sponsored by IEEE Computational Intelligence Society,” IEEE Computational Intelligence Magazine, pp. 4-8, 2017.
    [Bibtex]
    @Article{Acampora17,
    Title = {{IEEE 1849TM: The XES Standard: The Second IEEE Standard Sponsored by IEEE Computational Intelligence Society}},
    Author = {Acampora, G. and Vitiello, A. and Di Stefano, B. and Aalst, W. M. P. van der and G\"{u}nther, C. W. and Verbeek, H. M. W.},
    Journal = {{IEEE Computational Intelligence Magazine}},
    Year = {2017},
    Month = {May},
    Pages = {4--8},
    Doi = {10.1109/MCI.2017.2670420},
    Owner = {hverbeek},
    Timestamp = {2017.04.18}
    }
  • [PDF] H. S. Garcia Caballero, M. A. Westenberg, H. M. W. Verbeek, and W. M. P. van der Aalst, “Visual analytics for soundness verification of process models,” in Proceedings of TAProViz 2017, 2017.
    [Bibtex]
    @InProceedings{GarciaCaballero17,
    Title = {Visual Analytics for Soundness Verification of Process Models},
    Author = {Garcia Caballero, H. S. and Westenberg, M. A. and Verbeek, H. M. W. and Aalst, W. M. P. van der},
    Booktitle = {Proceedings of {TAProViz} 2017},
    Year = {2017},
    Note = {Accepted for publication},
    Owner = {hverbeek},
    Timestamp = {2017.07.06},
    Url = {http://www.win.tue.nl/~hverbeek/wp-content/papercite-data/pdf/garciacaballero17.pdf}
    }
  • [PDF] W. L. J. Lee, H. M. W. Verbeek, J. Munoz-Gama, W. M. P. van der Aalst, and M. Sepúlveda, “Replay using recomposition: alignment-based conformance checking in the large,” in 2017 bpm demo track and bpm dissertation award, bpm-d and da 2017, co-located with 15th international conference on business process management, bpm 2017, Barcelona, Spain, 2017.
    [Bibtex]
    @InProceedings{Lee17,
    Title = {Replay using Recomposition: Alignment-Based Conformance Checking in the Large},
    Author = {Lee, W. L. J. and Verbeek, H. M. W. and Munoz-Gama, J. and Aalst, W. M. P. van der and Sep\'{u}lveda, M.},
    Booktitle = {2017 BPM Demo Track and BPM Dissertation Award, BPM-D and DA 2017, co-located with 15th International Conference on Business Process Management, BPM 2017},
    Year = {2017},
    Address = {Barcelona, Spain},
    Editor = {Mendling, J. and Weske, M. and Clariso, R. and Pentland, B. and Aalst W. M. P. van der and Leopold, H. and Kumar, A.},
    Month = {September 2017},
    Note = {Conditionally accepted},
    Series = {CEUR Workshop Proceedings},
    Volume = {1920},
    Abstract = {In the area of process mining, efficient alignment-based conformance checking is a hot topic. Existing approaches for conformance checking are typically monolithic and compute exact fitness values. One limitation with monolithic approaches is that it may take a significant amount of computation time in large processes. Alternatively, decomposition approaches run much faster but do not always compute an exact fitness value. This paper presents the tool Replay using Recomposition which returns the exact fitness value and the resulting alignments using the decomposition approach in an iterative manner. Other than computing the exact fitness value, users can configure the balance between result accuracy and computation time to get a fitness interval within set constraints, e.g., "Give me the best fitness estimation you can find within 5 minutes".},
    Owner = {hverbeek},
    Timestamp = {2017.07.19},
    Url = {http://www.win.tue.nl/~hverbeek/wp-content/papercite-data/pdf/lee17.pdf}
    }
  • [PDF] [DOI] L. Raichelson, P. Soffer, and H. M. W. Verbeek, “Merging event logs: combining granularity levels for process flow analysis,” Information systems, 2017.
    [Bibtex]
    @Article{Raichelson17,
    Title = {Merging event logs: Combining granularity levels for process flow analysis},
    Author = {Lihi Raichelson and Pnina Soffer and H. M. W. Verbeek},
    Journal = {Information Systems},
    Year = {2017},
    Note = {Accepted for publication},
    Abstract = {Process mining techniques enable the discovery and analysis of business processes and the identification of opportunities for improvement. Processes often comprise separately managed procedures documented in separate log files which are impossible to mine in an integrative manner as the complete end-to-end process flow is obscure. In this paper we present a merging algorithm that results in a comprehensive merged log that offers two views of the end-to-end process: the case view, tracking the order, and the instance view tracking the item. This enables the identification of process flow problems that could not be detected by previous techniques.
    In addition, because our log-merging approach establishes the end-to-end process flow at two different abstraction levels, it is capable of handling both simple (n-to-one) and complex (n-to-many) relationships between log events. The unified log can be used by process mining techniques to identify flow problems, particularly at the point of integration between the processes under consideration. The procedure proposed in this paper has been implemented and evaluated using both synthetic logs and real-life logs.},
    Doi = {10.1016/j.is.2017.08.010},
    Owner = {hverbeek},
    Timestamp = {2017.08.30}
    }
  • [PDF] [DOI] H. M. W. Verbeek, “Decomposed replay using hiding and reduction as abstraction,” LNCS Transactions on Petri Nets and Other Models of Concurrency (ToPNoC), vol. XII, pp. 166-186, 2017.
    [Bibtex]
    @Article{Verbeek17,
    Title = {Decomposed Replay Using Hiding and Reduction as Abstraction},
    Author = {Verbeek, H. M. W.},
    Journal = {{LNCS Transactions on Petri Nets and Other Models of Concurrency (ToPNoC)}},
    Year = {2017},
    Pages = {166--186},
    Volume = {XII},
    Abstract = {In the area of process mining, decomposed replay has been proposed to be able to deal with nets and logs containing many different activities. The main assumption behind this decomposition is that replaying many subnets and sublogs containing only some activities is faster then replaying a single net and log containing many activities. Although for many nets and logs this assumption does hold, there are also nets and logs for which it does not hold. This paper shows an example net and log for which the decomposed replay may take way more time, and provides an explanation why this is the case. Next, to mitigate this problem, this paper proposes an alternative way to abstract the subnets from the single net, and shows that the decomposed replay using this alternative abstraction is faster than the monolithic replay even for the problematic cases as identified earlier. However, the alternative abstraction often results in longer computation times for the decomposed replay than the original abstraction. An advantage of the alternative abstraction over the original abstraction is that its cost estimates are typically better.},
    Doi = {10.1007/978-3-662-55862-1_8},
    Owner = {hverbeek},
    Timestamp = {2017.03.07},
    Url = {http://www.springerlink.com/content/f15t41545m061682/fulltext.pdf}
    }
  • [DOI] H. M. W. Verbeek, J. Munoz-Gama, and W. M. P. van der Aalst, “Divide and conquer: a tool framework for supporting decomposed discovery in process mining,” The Computer Journal, 2017.
    [Bibtex]
    @Article{Verbeek17a,
    Title = {Divide And Conquer: A Tool Framework for Supporting Decomposed Discovery in Process Mining},
    Author = {Verbeek, H. M. W. and Munoz-Gama, J. and Aalst, W. M. P. van der},
    Journal = {{The Computer Journal}},
    Year = {2017},
    Note = {Accepted for publication},
    Abstract = {In the area of process mining, decomposed replay has been proposed to be able to deal with nets and logs containing many different activities. The main assumption behind this decomposition is that replaying many subnets and sublogs containing only some activities is faster then replaying a single net and log containing many activities. Although for many nets and logs this assumption does hold, there are also nets and logs for which it does not hold. This paper shows an example net and log for which the decomposed replay may take way more time, and provides an explanation why this is the case. Next, to mitigate this problem, this paper proposes an alternative way to abstract the subnets from the single net, and shows that the decomposed replay using this alternative abstraction is faster than the monolithic replay even for the problematic cases as identified earlier. However, the alternative abstraction often results in longer computation times for the decomposed replay than the original abstraction. An advantage of the alternative abstraction over the original abstraction is that its cost estimates are typically better.},
    Doi = {10.1093/comjnl/bxx040},
    Owner = {hverbeek},
    Timestamp = {2017.04.06}
    }

3 Comments

Leave a Reply