Journal Articles
2021 |
Wally, Bernhard; Novak, Jiri; Novak, Petr; Huemer, Christian; Sindelar, Radek; Kaderar, Petr; Mazak-Huemer, Alexandra; Wimmer, Manuel Leveraging Iterative Plan Refinement for Reactive Smart Manufacturing Systems Journal Article IEEE Transactions on Automation Science and Engineering, 18 (1), pp. 230-243, 2021. @article{wally2021, title = {Leveraging Iterative Plan Refinement for Reactive Smart Manufacturing Systems}, author = {Bernhard Wally and Jiri Novak and Petr Novak and Christian Huemer and Radek Sindelar and Petr Kaderar and Alexandra Mazak-Huemer and Manuel Wimmer }, url = {https://ieeexplore.ieee.org/document/9190077}, doi = {10.1109/TASE.2020.3018402}, year = {2021}, date = {2021-01-21}, journal = {IEEE Transactions on Automation Science and Engineering}, volume = {18}, number = {1}, pages = {230-243}, abstract = {Industry 4.0 production systems must support flexibility in various dimensions, such as for the products to be produced, for the production processes to be applied, and for the available machinery. In this article, we present a novel approach to design and control smart manufacturing systems. The approach is reactive, that is responds to unplanned situations and implements an iterative refinement technique, that is, optimizes itself during runtime to better accommodate production goals. For realizing these advances, we present a model-driven methodology and we provide a prototypical implementation of such a production system. In particular, we employ Planning Domain Definition Language (PDDL) as our artificial intelligence environment for automated planning of production processes and combine it with one of the most prominent Industry 4.0 standards for the fundamental production system model: IEC 62264. We show how to plan the assembly of small trucks from available components and how to assign specific production operations to available production resources, including robotic manipulators and transportation system shuttles. Results of the evaluation indicate that the presented approach is feasible and that it is able to significantly strengthen the flexibility of production systems during runtime.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Industry 4.0 production systems must support flexibility in various dimensions, such as for the products to be produced, for the production processes to be applied, and for the available machinery. In this article, we present a novel approach to design and control smart manufacturing systems. The approach is reactive, that is responds to unplanned situations and implements an iterative refinement technique, that is, optimizes itself during runtime to better accommodate production goals. For realizing these advances, we present a model-driven methodology and we provide a prototypical implementation of such a production system. In particular, we employ Planning Domain Definition Language (PDDL) as our artificial intelligence environment for automated planning of production processes and combine it with one of the most prominent Industry 4.0 standards for the fundamental production system model: IEC 62264. We show how to plan the assembly of small trucks from available components and how to assign specific production operations to available production resources, including robotic manipulators and transportation system shuttles. Results of the evaluation indicate that the presented approach is feasible and that it is able to significantly strengthen the flexibility of production systems during runtime. |
2020 |
Mazak-Huemer, Alexandra; Wolny, Sabine; Gómez, Abel; Cabot, Jordi; Wimmer, Manuel; Kappel, Gerti Temporal Models on Time Series Databases Journal Article J. Object Technol., 19 (3), pp. 1-15, 2020. @article{MazakWGCWK20, title = {Temporal Models on Time Series Databases}, author = {Alexandra Mazak-Huemer and Sabine Wolny and Abel Gómez and Jordi Cabot and Manuel Wimmer and Gerti Kappel}, url = {http://www.jot.fm/contents/issue_2020_03/article14.html}, doi = {10.5381/jot.2020.19.3.a14}, year = {2020}, date = {2020-11-03}, journal = {J. Object Technol.}, volume = {19}, number = {3}, pages = {1-15}, abstract = {With the emergence of Cyber-Physical Systems (CPS), several sophisticated runtime monitoring solutions have been proposed in order to deal with extensive execution logs. One promising development in this respect is the integration of time series databases that support the storage of massive amounts of historical data as well as to provide fast query capabilities to reason about runtime properties of such CPS. In this paper, we discuss how conceptual modeling can benefit from time series databases, and vice versa. In particular, we present how metamodels and their instances, i.e., models, can be partially mapped to time series databases. Thus, the traceability between design and simulation/runtime activities can be ensured by retrieving and accessing runtime information, i.e., time series data, in design models. On this basis, the contribution of this paper is four-fold. First, a dedicated profile for annotating design models for time series databases is presented. Second, a mapping for integrating the metamodeling framework EMF with InfluxDB is introduced as a technology backbone enabling two distinct mapping strategies for model information. Third, we demonstrate how continuous time series queries can be combined with the Object Constraint Language (OCL) for navigation through models, now enriched with derived runtime properties. Finally, we also present an initial evaluation of the different mapping strategies with respect to data storage and query performance. Our initial results show the efficiency of applying derived runtime properties as time series queries also for large model histories.}, keywords = {}, pubstate = {published}, tppubtype = {article} } With the emergence of Cyber-Physical Systems (CPS), several sophisticated runtime monitoring solutions have been proposed in order to deal with extensive execution logs. One promising development in this respect is the integration of time series databases that support the storage of massive amounts of historical data as well as to provide fast query capabilities to reason about runtime properties of such CPS. In this paper, we discuss how conceptual modeling can benefit from time series databases, and vice versa. In particular, we present how metamodels and their instances, i.e., models, can be partially mapped to time series databases. Thus, the traceability between design and simulation/runtime activities can be ensured by retrieving and accessing runtime information, i.e., time series data, in design models. On this basis, the contribution of this paper is four-fold. First, a dedicated profile for annotating design models for time series databases is presented. Second, a mapping for integrating the metamodeling framework EMF with InfluxDB is introduced as a technology backbone enabling two distinct mapping strategies for model information. Third, we demonstrate how continuous time series queries can be combined with the Object Constraint Language (OCL) for navigation through models, now enriched with derived runtime properties. Finally, we also present an initial evaluation of the different mapping strategies with respect to data storage and query performance. Our initial results show the efficiency of applying derived runtime properties as time series queries also for large model histories. |
Alkhazi, Bader; Abid, Chaima; Kessentini, Marouane; Leroy, Dorian; Wimmer, Manuel Multi-criteria test cases selection for model transformations Journal Article Automated Software Engineering, 27 (1), pp. 91-118, 2020. @article{AlkhaziAKLW20, title = {Multi-criteria test cases selection for model transformations}, author = {Bader Alkhazi and Chaima Abid and Marouane Kessentini and Dorian Leroy and Manuel Wimmer }, url = {https://doi.org/10.1007/s10515-020-00271-w}, doi = {10.1007%2Fs10515-020-00271-w}, year = {2020}, date = {2020-10-26}, journal = {Automated Software Engineering}, volume = {27}, number = {1}, pages = {91-118}, abstract = {Model transformations play an important role in the evolution of systems in various fields such as healthcare, automotive and aerospace industry. Thus, it is important to check the correctness of model transformation programs. Several approaches have been proposed to generate test cases for model transformations based on different coverage criteria (e.g., statements, rules, metamodel elements, etc.). However, the execution of a large number of test cases during the evolution of transformation programs is time-consuming and may include a lot of overlap between the test cases. In this paper, we propose a test case selection approach for model transformations based on multi-objective search. We use the non-dominated sorting genetic algorithm (NSGA-II) to find the best trade-offs between two conflicting objectives: (1) maximize the coverage of rules and (2) minimize the execution time of the selected test cases. We validated our approach on several evolution cases of medium and large ATLAS Transformation Language programs.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Model transformations play an important role in the evolution of systems in various fields such as healthcare, automotive and aerospace industry. Thus, it is important to check the correctness of model transformation programs. Several approaches have been proposed to generate test cases for model transformations based on different coverage criteria (e.g., statements, rules, metamodel elements, etc.). However, the execution of a large number of test cases during the evolution of transformation programs is time-consuming and may include a lot of overlap between the test cases. In this paper, we propose a test case selection approach for model transformations based on multi-objective search. We use the non-dominated sorting genetic algorithm (NSGA-II) to find the best trade-offs between two conflicting objectives: (1) maximize the coverage of rules and (2) minimize the execution time of the selected test cases. We validated our approach on several evolution cases of medium and large ATLAS Transformation Language programs. |
Alkhazi, Bader; Abid, Chaima; Kessentini, Marouane; Wimmer, Manuel On the value of quality attributes for refactoring ATL model transformations: A multi-objective approach Journal Article Information and Software Technology, 120 , pp. 106243, 2020. @article{AlkhaziAKW20, title = {On the value of quality attributes for refactoring ATL model transformations: A multi-objective approach}, author = {Bader Alkhazi and Chaima Abid and Marouane Kessentini and Manuel Wimmer}, url = {https://doi.org/10.1016/j.infsof.2019.106243}, doi = {10.1016/j.infsof.2019.106243}, year = {2020}, date = {2020-10-26}, journal = {Information and Software Technology}, volume = {120}, pages = {106243}, abstract = {Model transformations play a fundamental role in Model-Driven Engineering (MDE) as they are used to manipulate models and to transform them between source and target metamodels. However, model transformation programs lack significant support to maintain good quality which is in contrast to established programming paradigms such as object-oriented programming. In order to improve the quality of model transformations, the majority of existing studies suggest manual support for the developers to execute a number of refactoring types on model transformation programs. Other recent studies aimed to automate the refactoring of model transformation programs, mostly focusing on the ATLAS Transformation Language (ATL), by improving mainly few quality metrics using a number of refactoring types.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Model transformations play a fundamental role in Model-Driven Engineering (MDE) as they are used to manipulate models and to transform them between source and target metamodels. However, model transformation programs lack significant support to maintain good quality which is in contrast to established programming paradigms such as object-oriented programming. In order to improve the quality of model transformations, the majority of existing studies suggest manual support for the developers to execute a number of refactoring types on model transformation programs. Other recent studies aimed to automate the refactoring of model transformation programs, mostly focusing on the ATLAS Transformation Language (ATL), by improving mainly few quality metrics using a number of refactoring types. |
Wortmann, Andreas; Barais, Olivier; Combemale, Benoît; Wimmer, Manuel Modeling languages in Industry 4.0: an extended systematic mapping study Journal Article Software and Systems Modeling, 19 (1), pp. 67-94, 2020. @article{WortmannBCW20, title = {Modeling languages in Industry 4.0: an extended systematic mapping study}, author = {Andreas Wortmann and Olivier Barais and Benoît Combemale and Manuel Wimmer}, url = {https://doi.org/10.1007/s10270-019-00757-6}, doi = {10.1007/s10270-019-00757-6}, year = {2020}, date = {2020-09-18}, journal = {Software and Systems Modeling}, volume = {19}, number = {1}, pages = {67-94}, abstract = {Industry 4.0 integrates cyber-physical systems with the Internet of Things to optimize the complete value-added chain. Successfully applying Industry 4.0 requires the cooperation of various stakeholders from different domains. Domain-specific modeling languages promise to facilitate their involvement through leveraging (domain-specific) models to primary development artifacts. We aim to assess the use of modeling in Industry 4.0 through the lens of modeling languages in a broad sense. Based on an extensive literature review, we updated our systematic mapping study on modeling languages and modeling techniques used in Industry 4.0 (Wortmann et al., Conference on model-driven engineering languages and systems (MODELS’17), IEEE, pp 281–291, 2017) to include publications until February 2018. Overall, the updated study considers 3344 candidate publications that were systematically investigated until 408 relevant publications were identified. Based on these, we developed an updated map of the research landscape on modeling languages and techniques for Industry 4.0. Research on modeling languages in Industry 4.0 focuses on contributing methods to solve the challenges of digital representation and integration. To this end, languages from systems engineering and knowledge representation are applied most often but rarely combined. There also is a gap between the communities researching and applying modeling languages for Industry 4.0 that originates from different perspectives on modeling and related standards. From the vantage point of modeling, Industry 4.0 is the combination of systems engineering, with cyber-physical systems, and knowledge engineering. Research currently is splintered along topics and communities and accelerating progress demands for multi-disciplinary, integrated research efforts.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Industry 4.0 integrates cyber-physical systems with the Internet of Things to optimize the complete value-added chain. Successfully applying Industry 4.0 requires the cooperation of various stakeholders from different domains. Domain-specific modeling languages promise to facilitate their involvement through leveraging (domain-specific) models to primary development artifacts. We aim to assess the use of modeling in Industry 4.0 through the lens of modeling languages in a broad sense. Based on an extensive literature review, we updated our systematic mapping study on modeling languages and modeling techniques used in Industry 4.0 (Wortmann et al., Conference on model-driven engineering languages and systems (MODELS’17), IEEE, pp 281–291, 2017) to include publications until February 2018. Overall, the updated study considers 3344 candidate publications that were systematically investigated until 408 relevant publications were identified. Based on these, we developed an updated map of the research landscape on modeling languages and techniques for Industry 4.0. Research on modeling languages in Industry 4.0 focuses on contributing methods to solve the challenges of digital representation and integration. To this end, languages from systems engineering and knowledge representation are applied most often but rarely combined. There also is a gap between the communities researching and applying modeling languages for Industry 4.0 that originates from different perspectives on modeling and related standards. From the vantage point of modeling, Industry 4.0 is the combination of systems engineering, with cyber-physical systems, and knowledge engineering. Research currently is splintered along topics and communities and accelerating progress demands for multi-disciplinary, integrated research efforts. |
Leroy, Dorian; Bousse, Erwan; Wimmer, Manuel; Mayerhofer, Tanja; Combemale, Benoît; Schwinger, Wieland Behavioral interfaces for executable DSLs Journal Article Software and Systems Modeling, 19 (4), pp. 1015-1043, 2020. @article{LeroyBWMCS20, title = {Behavioral interfaces for executable DSLs}, author = {Dorian Leroy and Erwan Bousse and Manuel Wimmer and Tanja Mayerhofer and Benoît Combemale and Wieland Schwinger}, url = {https://link.springer.com/content/pdf/10.1007/s10270-020-00798-2.pdf}, doi = {10.1007/s10270-020-00798-2}, year = {2020}, date = {2020-09-18}, journal = {Software and Systems Modeling}, volume = {19}, number = {4}, pages = {1015-1043}, abstract = {Executable domain-specific languages (DSLs) enable the execution of behavioral models. While an execution is mostly driven by the model content (e.g., control structures), many use cases require interacting with the running model, such as simulating scenarios in an automated or interactive way, or coupling the model with other models of the system or environment. The management of these interactions is usually hardcoded into the semantics of the DSL, which prevents its reuse for other DSLs and the provision of generic interaction-centric tools (e.g., event injector). In this paper, we propose a metalanguage for complementing the definition of executable DSLs with explicit behavioral interfaces to enable external tools to interact with executed models in a unified way. We implemented the proposed metalanguage in the GEMOC Studio and show how behavioral interfaces enable the realization of tools that are generic and thus usable for different executable DSLs.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Executable domain-specific languages (DSLs) enable the execution of behavioral models. While an execution is mostly driven by the model content (e.g., control structures), many use cases require interacting with the running model, such as simulating scenarios in an automated or interactive way, or coupling the model with other models of the system or environment. The management of these interactions is usually hardcoded into the semantics of the DSL, which prevents its reuse for other DSLs and the provision of generic interaction-centric tools (e.g., event injector). In this paper, we propose a metalanguage for complementing the definition of executable DSLs with explicit behavioral interfaces to enable external tools to interact with executed models in a unified way. We implemented the proposed metalanguage in the GEMOC Studio and show how behavioral interfaces enable the realization of tools that are generic and thus usable for different executable DSLs. |
Wally, Bernhard; Vyskocil, Jiri; Novak, Petr; Huemer, Christian; Sindelar, Radek; Kadera, Petr; Mazak-Huemer, Alexandra; Wimmer, Manuel Leveraging Iterative Plan Refinement for Reactive Smart Manufacturing Systems Journal Article IEEE Transactions on Automation Science and Engineering, pp. 1-14, 2020. @article{wimmer2020b, title = {Leveraging Iterative Plan Refinement for Reactive Smart Manufacturing Systems}, author = {Bernhard Wally and Jiri Vyskocil and Petr Novak and Christian Huemer and Radek Sindelar and Petr Kadera and Alexandra Mazak-Huemer and Manuel Wimmer}, url = {https://doi.org/10.1109/TASE.2020.3018402 https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=9190077}, doi = {10.1109/TASE.2020.3018402}, year = {2020}, date = {2020-09-09}, journal = {IEEE Transactions on Automation Science and Engineering}, pages = {1-14}, abstract = {Industry 4.0 production systems must support flexibility in various dimensions, such as for the products to be produced, for the production processes to be applied, and for the available machinery. In this article, we present a novel approach to design and control smart manufacturing systems. The approach is reactive, that is responds to unplanned situations and implements an iterative refinement technique, that is, optimizes itself during runtime to better accommodate production goals. For realizing these advances, we present a model-driven methodology and we provide a prototypical implementation of such a production system. In particular, we employ Planning Domain Definition Language (PDDL) as our artificial intelligence environment for automated planning of production processes and combine it with one of the most prominent Industry 4.0 standards for the fundamental production system model: IEC 62264. We show how to plan the assembly of small trucks from available components and how to assign specific production operations to available production resources, including robotic manipulators and transportation system shuttles. Results of the evaluation indicate that the presented approach is feasible and that it is able to significantly strengthen the flexibility of production systems during runtime.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Industry 4.0 production systems must support flexibility in various dimensions, such as for the products to be produced, for the production processes to be applied, and for the available machinery. In this article, we present a novel approach to design and control smart manufacturing systems. The approach is reactive, that is responds to unplanned situations and implements an iterative refinement technique, that is, optimizes itself during runtime to better accommodate production goals. For realizing these advances, we present a model-driven methodology and we provide a prototypical implementation of such a production system. In particular, we employ Planning Domain Definition Language (PDDL) as our artificial intelligence environment for automated planning of production processes and combine it with one of the most prominent Industry 4.0 standards for the fundamental production system model: IEC 62264. We show how to plan the assembly of small trucks from available components and how to assign specific production operations to available production resources, including robotic manipulators and transportation system shuttles. Results of the evaluation indicate that the presented approach is feasible and that it is able to significantly strengthen the flexibility of production systems during runtime. |
Martinez, Salvador; Wimmer, Manuel; Cabot, Jordi Efficient plagiarism detection for software modeling assignments Journal Article Computer Science Education, 30 (2), pp. 187-215, 2020. @article{MartinezWC20, title = {Efficient plagiarism detection for software modeling assignments}, author = {Salvador Martinez and Manuel Wimmer and Jordi Cabot}, url = {https://doi.org/10.1080/08993408.2020.1711495}, doi = {10.1080/08993408.2020.1711495}, year = {2020}, date = {2020-08-06}, journal = {Computer Science Education}, volume = {30}, number = {2}, pages = {187-215}, abstract = {Reports suggest plagiarism is a common occurrence in universities. While plagiarism detection mechanisms exist for textual artifacts, this is less so for non-code related ones such as software design artifacts like models, metamodels or model transformations. Objective: To provide an efficient mechanism for the detection of plagiarism in repositories of Model-Driven Engineering (MDE) assignments. Method: Our approach is based on the adaptation of the Locality Sensitive Hashing, an approximate nearest neighbor search mechanism, to the modeling technical space. We evaluate our approach on a real use case consisting of two repositories containing 10 years of student answers to MDE course assignments. Findings: We have found that: (i) effectively, plagiarism occurred on the aforementioned course assignments (ii) our tool was able to efficiently detect them. Implications: Plagiarism detection must be integrated into the toolset and activities of MDE instructors in order to correctly evaluate students.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Reports suggest plagiarism is a common occurrence in universities. While plagiarism detection mechanisms exist for textual artifacts, this is less so for non-code related ones such as software design artifacts like models, metamodels or model transformations. Objective: To provide an efficient mechanism for the detection of plagiarism in repositories of Model-Driven Engineering (MDE) assignments. Method: Our approach is based on the adaptation of the Locality Sensitive Hashing, an approximate nearest neighbor search mechanism, to the modeling technical space. We evaluate our approach on a real use case consisting of two repositories containing 10 years of student answers to MDE course assignments. Findings: We have found that: (i) effectively, plagiarism occurred on the aforementioned course assignments (ii) our tool was able to efficiently detect them. Implications: Plagiarism detection must be integrated into the toolset and activities of MDE instructors in order to correctly evaluate students. |
Leroy, Dorian; Jeanjean, Pierre; Bousse, Erwan; Wimmel, Manuel; Combemale, Benoit Runtime Monitoring for Executable DSLs Journal Article J. Object Technol., 19 (2), pp. 6:1-23, 2020. @article{LeroyJBWC20, title = {Runtime Monitoring for Executable DSLs}, author = {Dorian Leroy and Pierre Jeanjean and Erwan Bousse and Manuel Wimmel and Benoit Combemale}, url = {https://doi.org/10.5381/jot.2020.19.2.a6}, doi = {10.5381/jot.2020.19.2.a6}, year = {2020}, date = {2020-08-03}, journal = {J. Object Technol.}, volume = {19}, number = {2}, pages = {6:1-23}, abstract = {Runtime monitoring is a fundamental technique used throughout the lifecycle of a system for many purposes, such as debugging, testing, or live analytics. While runtime monitoring for general purpose programming languages has seen a great amount of research, developing such complex facilities for any executable Domain Specific Language (DSL) remains a challenging, reoccurring and error prone task. A generic solution must both support a wide range of executable DSLs (xDSLs) and induce as little execution time overhead as possible. Our contribution is a fully generic approach based on a temporal property language with a semantics tailored for runtime verification. Properties can be compiled to efficient runtime monitors that can be attached to any kind of executable discrete event model within an integrated development environment. Efficiency is bolstered using a novel combination of structural model queries and complex event processing. Our evaluation on 3 xDSLs shows that the approach is applicable with an execution time overhead of 121% (on executions shorter than 1s), to 79% (on executions shorter than 20s) making it suitable for model testing and debugging.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Runtime monitoring is a fundamental technique used throughout the lifecycle of a system for many purposes, such as debugging, testing, or live analytics. While runtime monitoring for general purpose programming languages has seen a great amount of research, developing such complex facilities for any executable Domain Specific Language (DSL) remains a challenging, reoccurring and error prone task. A generic solution must both support a wide range of executable DSLs (xDSLs) and induce as little execution time overhead as possible. Our contribution is a fully generic approach based on a temporal property language with a semantics tailored for runtime verification. Properties can be compiled to efficient runtime monitors that can be attached to any kind of executable discrete event model within an integrated development environment. Efficiency is bolstered using a novel combination of structural model queries and complex event processing. Our evaluation on 3 xDSLs shows that the approach is applicable with an execution time overhead of 121% (on executions shorter than 1s), to 79% (on executions shorter than 20s) making it suitable for model testing and debugging. |
Cuadrao, Jesus Sanchez; Burgueño, Loli; Wimmer, Manuel; Vallecillo, Antonio Efficient execution of ATL model transformations using static analysis and parallelism Journal Article IEEE Transcations on Software Engineering, pp. 1, 2020. @article{wimmer2020c, title = {Efficient execution of ATL model transformations using static analysis and parallelism}, author = {Jesus Sanchez Cuadrao and Loli Burgueño and Manuel Wimmer and Antonio Vallecillo}, url = {https://doi.org/10.1109/TSE.2020.3011388 https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=9146715}, doi = {10.1109/TSE.2020.3011388}, year = {2020}, date = {2020-07-23}, journal = {IEEE Transcations on Software Engineering}, pages = {1}, abstract = {Although model transformations are considered to be the heart and soul of Model Driven Engineering (MDE), there are still several challenges that need to be addressed to unleash their full potential in industrial settings. Among other shortcomings, their performance and scalability remain unsatisfactory for dealing with large models, making their wide adoption difficult in practice. This paper presents A2L, a compiler for the parallel execution of ATL model transformations, which produces efficient code that can use existing multicore computer architectures, and applies effective optimizations at the transformation level using static analysis. We have evaluated its performance in both sequential and multi-threaded modes obtaining significant speedups with respect to current ATL implementations. In particular, we obtain speedups between 2.32x and 38.28x for the A2L sequential version, and between 2.40x and 245.83x when A2L is executed in parallel, with expected average speedups of 8.59x and 22.42x, respectively.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Although model transformations are considered to be the heart and soul of Model Driven Engineering (MDE), there are still several challenges that need to be addressed to unleash their full potential in industrial settings. Among other shortcomings, their performance and scalability remain unsatisfactory for dealing with large models, making their wide adoption difficult in practice. This paper presents A2L, a compiler for the parallel execution of ATL model transformations, which produces efficient code that can use existing multicore computer architectures, and applies effective optimizations at the transformation level using static analysis. We have evaluated its performance in both sequential and multi-threaded modes obtaining significant speedups with respect to current ATL implementations. In particular, we obtain speedups between 2.32x and 38.28x for the A2L sequential version, and between 2.40x and 245.83x when A2L is executed in parallel, with expected average speedups of 8.59x and 22.42x, respectively. |
Combemale, Benoit; Kienzle, Jörg; Mussbacher, Gunter; Hyacinth, Ali; Amyot, Daniel; Bagherzadeh, Mojtaba; Batot, Edouard; Bencomo, Nelliy; Benni, Benjamin; Bruel, Jean-Michel; Cabot, Jordi; Cheng, Betty; Collet, Philippe; Engels, Gregor; Heinrich, Robert; Jézéquel, Jean-Marc; Koziolek, Anne; Mosser, Sébastien; Reussner, Ralf; Sahraoui, Houari; Saini, Rijul; Sallou, June; Stinckwich, Serge; Syriani, Eugene; Wimmer, Manuel A Hitchhiker's Guide to Model-Driven Engineering for Data-Centric Systems Journal Article IEEE Software, pp. 1-9, 2020. @article{wimmer2020d, title = {A Hitchhiker's Guide to Model-Driven Engineering for Data-Centric Systems}, author = {Benoit Combemale and Jörg Kienzle and Gunter Mussbacher and Ali Hyacinth and Daniel Amyot and Mojtaba Bagherzadeh and Edouard Batot and Nelliy Bencomo and Benjamin Benni and Jean-Michel Bruel and Jordi Cabot and Betty Cheng and Philippe Collet and Gregor Engels and Robert Heinrich and Jean-Marc Jézéquel and Anne Koziolek and Sébastien Mosser and Ralf Reussner and Houari Sahraoui and Rijul Saini and June Sallou and Serge Stinckwich and Eugene Syriani and Manuel Wimmer}, url = {https://hal.inria.fr/hal-02612087/file/ieeesw-moda-preprint.pdf}, doi = {10.1109/MS.2020.2995125}, year = {2020}, date = {2020-05-18}, journal = {IEEE Software}, pages = {1-9}, abstract = {A broad spectrum of application domains are increasingly making use of heterogeneous and large volumes of data with varying degrees of humans in the loop. The recent success of Artificial Intelligence (AI) and, in particular, Machine Learning (ML) further amplifies the relevance of data in the development, maintenance, evolution, and execution management of systems built with model-driven engineering techniques. Applications include critical infrastructure areas such as intelligent transportation, smart energy management, public healthcare, and emergency and disaster management; many of these systems are considered socio-technical systems given the human, social, and organizational factors that must be considered during the system life-cycle [1]. This article introduces a conceptual reference framework – the Models and Data (MODA) framework – to support a data-centric and model-driven approach for the integration of heterogeneous models and their respective data for the entire life-cycle of socio-technical systems.}, keywords = {}, pubstate = {published}, tppubtype = {article} } A broad spectrum of application domains are increasingly making use of heterogeneous and large volumes of data with varying degrees of humans in the loop. The recent success of Artificial Intelligence (AI) and, in particular, Machine Learning (ML) further amplifies the relevance of data in the development, maintenance, evolution, and execution management of systems built with model-driven engineering techniques. Applications include critical infrastructure areas such as intelligent transportation, smart energy management, public healthcare, and emergency and disaster management; many of these systems are considered socio-technical systems given the human, social, and organizational factors that must be considered during the system life-cycle [1]. This article introduces a conceptual reference framework – the Models and Data (MODA) framework – to support a data-centric and model-driven approach for the integration of heterogeneous models and their respective data for the entire life-cycle of socio-technical systems. |
2019 |
Syriani, Eugene; Bill, Robert; Wimmer, Manuel Domain-Specific Model Distance Measures Journal Article Journal of Object Technology, 18 (3), pp. 1-19, 2019. @article{Syriani2019BW19, title = {Domain-Specific Model Distance Measures}, author = {Eugene Syriani and Robert Bill and Manuel Wimmer}, url = {https://doi.org/10.5381/jot.2019.18.3.a3 }, doi = {10.5381/jot.2019.18.3.a3}, year = {2019}, date = {2019-12-31}, booktitle = {In Proceedings of the 12th International Conference on Model Transformations (ICMT 2019)}, journal = {Journal of Object Technology}, volume = {18}, number = {3}, pages = {1-19}, abstract = {A lot of research was invested in the last decade to develop differencing methods to identify the changes performed between two modelversions. Typically, these changes are captured in an explicit difference model. However, less attention was paid to quantifying the distance between model versions. While different versions of a model may have the same amount of differences, their distance to the base model may be drastically different. Therefore, we present distance metrics for models. We provide a method to automatically generate tool support for computing domain-specific distance measures. We show the benefits of distance measures over model differences in the use case of searching for the explanation of model evolution in terms of domain-specific change operations. The results of our experiments show that using distance metrics outperforms common difference models techniques.}, keywords = {}, pubstate = {published}, tppubtype = {article} } A lot of research was invested in the last decade to develop differencing methods to identify the changes performed between two modelversions. Typically, these changes are captured in an explicit difference model. However, less attention was paid to quantifying the distance between model versions. While different versions of a model may have the same amount of differences, their distance to the base model may be drastically different. Therefore, we present distance metrics for models. We provide a method to automatically generate tool support for computing domain-specific distance measures. We show the benefits of distance measures over model differences in the use case of searching for the explanation of model evolution in terms of domain-specific change operations. The results of our experiments show that using distance metrics outperforms common difference models techniques. |
Feldmann, Stefan; Kernschmidt, Konstantin; Wimmer, Manuel; Vogel-Heuser, Birgit Managing Inter-Model Inconsistencies in Model-Based Systems Engineering: Application in Automated Production Systems Engineering Journal Article Journal of Systems and Software Engineering, 153 , pp. 105-134, 2019. @article{Feldmann2019imi, title = {Managing Inter-Model Inconsistencies in Model-Based Systems Engineering: Application in Automated Production Systems Engineering}, author = {Stefan Feldmann and Konstantin Kernschmidt and Manuel Wimmer and Birgit Vogel-Heuser}, doi = {10.1016/j.jss.2019.03.060}, year = {2019}, date = {2019-12-02}, journal = {Journal of Systems and Software Engineering}, volume = {153}, pages = {105-134}, abstract = {To cope with the challenge of managing the complexity of automated production systems, model-based approaches are applied increasingly. However, due to the multitude of different disciplines involved in automated production systems engineering, e.g., mechanical, electrical, and software engineering, several modeling languages are used within a project to describe the system from different perspectives. To ensure that the resulting system models are not contradictory, the necessity to continuously diagnose and handle inconsistencies within and in between models arises. This article proposes a comprehensive approach that allows stakeholders to specify, diagnose, and handle inconsistencies in model-based systems engineering. In particular, to explicitly capture the dependencies and consistency rules that must hold between the disparate engineering models, a dedicated graphical modeling language is proposed. By means of this language, stakeholders can specify, diagnose, and handle inconsistencies in the accompanying inconsistency management framework. The approach is implemented based on the Eclipse Modeling Framework (EMF) and evaluated based on a demonstrator project as well as a small user experiment. First findings indicate that the approach is expressive enough to capture typical dependencies and consistency rules in the automated production system domain and that it requires less effort compared to manually developing inter-model inconsistency management solutions.}, keywords = {}, pubstate = {published}, tppubtype = {article} } To cope with the challenge of managing the complexity of automated production systems, model-based approaches are applied increasingly. However, due to the multitude of different disciplines involved in automated production systems engineering, e.g., mechanical, electrical, and software engineering, several modeling languages are used within a project to describe the system from different perspectives. To ensure that the resulting system models are not contradictory, the necessity to continuously diagnose and handle inconsistencies within and in between models arises. This article proposes a comprehensive approach that allows stakeholders to specify, diagnose, and handle inconsistencies in model-based systems engineering. In particular, to explicitly capture the dependencies and consistency rules that must hold between the disparate engineering models, a dedicated graphical modeling language is proposed. By means of this language, stakeholders can specify, diagnose, and handle inconsistencies in the accompanying inconsistency management framework. The approach is implemented based on the Eclipse Modeling Framework (EMF) and evaluated based on a demonstrator project as well as a small user experiment. First findings indicate that the approach is expressive enough to capture typical dependencies and consistency rules in the automated production system domain and that it requires less effort compared to manually developing inter-model inconsistency management solutions. |
Burgueno, Loli; Ciccozzi, Federico; Famelis, Michalis; Kappel, Gerti; Lambers, Leen; Mosser, Sébastien; Paige, Richard F; Pierantonio, Alfonso; Rensink, Arend; Salay, Rick; Taentzer, Gabriele; Vallecillo, Antonio; Wimmer, Manuel Contents for a Model-Based Software Engineering Body of Knowledge Journal Article Software and Systems Modeling, 18 (6), pp. 3193-3205, 2019. @article{BurguenoCFKLMPP19, title = {Contents for a Model-Based Software Engineering Body of Knowledge}, author = {Loli Burgueno and Federico Ciccozzi and Michalis Famelis and Gerti Kappel and Leen Lambers and Sébastien Mosser and Richard F. Paige and Alfonso Pierantonio and Arend Rensink and Rick Salay and Gabriele Taentzer and Antonio Vallecillo and Manuel Wimmer}, doi = {10.1007/s10270-019-00746-9}, year = {2019}, date = {2019-10-31}, journal = {Software and Systems Modeling}, volume = {18}, number = {6}, pages = {3193-3205}, keywords = {}, pubstate = {published}, tppubtype = {article} } |
Bousse, Erwan; Wimmer, Manuel Domain-Level Observation and Control for Compiled Executable DSLs Journal Article 22nd ACM/IEEE International Conference on Model Driven Engineering, Languages and Systems, (MODELS 2019), Munich, Germany, September 15-20, 2019, pp. 150-160, 2019. @article{wimmer2019e, title = {Domain-Level Observation and Control for Compiled Executable DSLs}, author = {Erwan Bousse and Manuel Wimmer}, url = {https://www.se.jku.at/domain-level-observation-and-control-for-compiled-executable-dsls/}, doi = {10.1109/MODELS.2019.000-6}, year = {2019}, date = {2019-09-20}, journal = {22nd ACM/IEEE International Conference on Model Driven Engineering, Languages and Systems, (MODELS 2019), Munich, Germany, September 15-20, 2019}, pages = {150-160}, abstract = {Executable Domain-Specific Languages (DSLs) are commonly defined with either operational semantics (i.e., interpretation) or translational semantics (i.e., compilation). An interpreted DSL relies on domain concepts to specify the possible execution states and steps, which enables the observation and control of executions using the very same domain concepts. In contrast, a compiled DSL relies on a transformation to an arbitrarily different target language. This creates a conceptual gap, where the execution can only be observed and controlled through target domain concepts, to the detriment of experts or tools that only understand the source domain. To address this problem, we propose a language engineering architecture for compiled DSLs that enables the observation and control of executions using source domain concepts. The architecture requires the definition of the source domain execution steps and states, along with a feedback manager that translates steps and states of the target domain back to the source domain. We evaluate the architecture with two different compiled DSLs, and show that it does enable domain-level observation and control while increasing execution time by 2× in the worst observed.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Executable Domain-Specific Languages (DSLs) are commonly defined with either operational semantics (i.e., interpretation) or translational semantics (i.e., compilation). An interpreted DSL relies on domain concepts to specify the possible execution states and steps, which enables the observation and control of executions using the very same domain concepts. In contrast, a compiled DSL relies on a transformation to an arbitrarily different target language. This creates a conceptual gap, where the execution can only be observed and controlled through target domain concepts, to the detriment of experts or tools that only understand the source domain. To address this problem, we propose a language engineering architecture for compiled DSLs that enables the observation and control of executions using source domain concepts. The architecture requires the definition of the source domain execution steps and states, along with a feedback manager that translates steps and states of the target domain back to the source domain. We evaluate the architecture with two different compiled DSLs, and show that it does enable domain-level observation and control while increasing execution time by 2× in the worst observed. |
Wally, Bernhard; Vyskocil, Jiri; Novak, Petr; Huemer, Christian; Sindelar, Radek; Kadera, Petr; Mazak, Alexandra; Wimmer, Manuel Flexible Production Systems: Automated Generation of Operations Plans Based on ISA-95 and PDDL Journal Article IEEE Robotics and Automation Letters, 4 (4), pp. 4062-4069, 2019, ISSN: 2377-3766. @article{Wally2019fps, title = {Flexible Production Systems: Automated Generation of Operations Plans Based on ISA-95 and PDDL}, author = {Bernhard Wally and Jiri Vyskocil and Petr Novak and Christian Huemer and Radek Sindelar and Petr Kadera and Alexandra Mazak and Manuel Wimmer}, doi = {10.1109/LRA.2019.2929991}, issn = {2377-3766}, year = {2019}, date = {2019-07-22}, journal = {IEEE Robotics and Automation Letters}, volume = {4}, number = {4}, pages = {4062-4069}, abstract = {Model-driven engineering (MDE) provides tools and methods for the manipulation of formal models. In this letter, we leverage MDE for the transformation of production system models into flat files that are understood by general purpose planning tools and that enable the computation of "plans", i.e., sequences of production steps that are required to reach certain production goals. These plans are then merged back into the production system model, thus enriching the formalized production system knowledge.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Model-driven engineering (MDE) provides tools and methods for the manipulation of formal models. In this letter, we leverage MDE for the transformation of production system models into flat files that are understood by general purpose planning tools and that enable the computation of "plans", i.e., sequences of production steps that are required to reach certain production goals. These plans are then merged back into the production system model, thus enriching the formalized production system knowledge. |
Vogel-Heuser, Birgit; Fantuzzi, Cesare; Wimmer, Manuel; Böhm, Markus; Fay, Alexander Herausforderungen in der interdisziplinären Entwicklung von Cyber-Physischen Produktionssystemen Journal Article Automatisierungstechnik, 67 (6), pp. 445-454, 2019. @article{Vogel-HeuserFWB19, title = {Herausforderungen in der interdisziplinären Entwicklung von Cyber-Physischen Produktionssystemen}, author = {Birgit Vogel-Heuser and Cesare Fantuzzi and Manuel Wimmer and Markus Böhm and Alexander Fay }, doi = {10.1515/auto-2018-0144}, year = {2019}, date = {2019-07-04}, journal = {Automatisierungstechnik}, volume = {67}, number = {6}, pages = {445-454}, abstract = {Modellbasierte Systementwicklung hat bereits Anwendung in der industriellen Entwicklung einer Vielzahl technischer Systeme gefunden. Die Verwendung verschiedener Modelle, z. B. für mechanische, elektrotechnische und automatisierungstechnische Systemaspekte sowie deren Varianten und Versionen unterstützt interdisziplinäre Innovationen, führt jedoch zu vielen Herausforderungen. Eine davon ist die heterogene Modelllandschaft, die insbesondere von überlappenden, teilweise redundant modellierten Informationen geprägt ist. Zudem unterliegen Entwicklungs-, Produktions- und Serviceprozesse ständig internen sowie auch externen Entwicklungszyklen. Zur Bewältigung dieser Herausforderungen können verschiedene Methoden und Techniken eingesetzt werden. In diesem Beitrag werden einige dieser Ansätze hinsichtlich ihrer Vorteile und Grenzen untersucht, und zwar das Konsistenz- bzw. Inkonsistenzmanagement von gekoppelten Modellen im Engineering, das disziplin-übergreifende Management des Engineering-Workflows sowie die Bedeutung von Smart Data Ansätzen bzw. modellbasiertem Wissen.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Modellbasierte Systementwicklung hat bereits Anwendung in der industriellen Entwicklung einer Vielzahl technischer Systeme gefunden. Die Verwendung verschiedener Modelle, z. B. für mechanische, elektrotechnische und automatisierungstechnische Systemaspekte sowie deren Varianten und Versionen unterstützt interdisziplinäre Innovationen, führt jedoch zu vielen Herausforderungen. Eine davon ist die heterogene Modelllandschaft, die insbesondere von überlappenden, teilweise redundant modellierten Informationen geprägt ist. Zudem unterliegen Entwicklungs-, Produktions- und Serviceprozesse ständig internen sowie auch externen Entwicklungszyklen. Zur Bewältigung dieser Herausforderungen können verschiedene Methoden und Techniken eingesetzt werden. In diesem Beitrag werden einige dieser Ansätze hinsichtlich ihrer Vorteile und Grenzen untersucht, und zwar das Konsistenz- bzw. Inkonsistenzmanagement von gekoppelten Modellen im Engineering, das disziplin-übergreifende Management des Engineering-Workflows sowie die Bedeutung von Smart Data Ansätzen bzw. modellbasiertem Wissen. |
Wally, Bernhard; Huemer, Christian; Mazak, Alexandra; Wimmer, Manuel; Sindelar, Radek Modeling Variability and Persisting Configurations in OPC UA Journal Article Procedia CIRP, 81 , pp. 13-18, 2019, ISSN: 2212-8271. @article{Wally2019variability, title = {Modeling Variability and Persisting Configurations in OPC UA}, author = {Bernhard Wally and Christian Huemer and Alexandra Mazak and Manuel Wimmer and Radek Sindelar}, doi = {10.1016/j.procir.2019.03.003}, issn = {2212-8271}, year = {2019}, date = {2019-06-24}, journal = {Procedia CIRP}, volume = {81}, pages = {13-18}, abstract = {Variability is crucial in the design of many advanced goods and it is also receiving increasing attention in production systems engineering. Since OPC Unified Architecture plays an important role when it comes to standardized information exchange in modern production systems, it can be a melting pot for information from various engineering domains, such as product design and production engineering — thus, it is an ideal place to hold variability information of products and production systems alike. Based on an initial variability information model we propose additional concepts for the persisting of configurations.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Variability is crucial in the design of many advanced goods and it is also receiving increasing attention in production systems engineering. Since OPC Unified Architecture plays an important role when it comes to standardized information exchange in modern production systems, it can be a melting pot for information from various engineering domains, such as product design and production engineering — thus, it is an ideal place to hold variability information of products and production systems alike. Based on an initial variability information model we propose additional concepts for the persisting of configurations. |
Wolny, Sabine; Mazak-Huemer, Alexandra; Carpella, Christine; Geist, Verena; Wimmer, Manuel Thirteen years of SysML: a systematic mapping study Journal Article Software Systems Modeling, 19 (1), pp. 111-169, 2019. @article{WolnyMCGW20, title = {Thirteen years of SysML: a systematic mapping study}, author = {Sabine Wolny and Alexandra Mazak-Huemer and Christine Carpella and Verena Geist and Manuel Wimmer}, url = {https://doi.org/10.1007/s10270-019-00735-y}, doi = {10.1007/s10270-019-00735-y}, year = {2019}, date = {2019-05-13}, journal = {Software Systems Modeling}, volume = {19}, number = {1}, pages = {111-169}, abstract = {The OMG standard Systems Modeling Language (SysML) has been on the market for about thirteen years. This standard is an extended subset of UML providing a graphical modeling language for designing complex systems by considering software as well as hardware parts. Over the period of thirteen years, many publications have covered various aspects of SysML in different research fields. The aim of this paper is to conduct a systematic mapping study about SysML to identify the different categories of papers, (i) to get an overview of existing research topics and groups, (ii) to identify whether there are any publication trends, and (iii) to uncover possible missing links. We followed the guidelines for conducting a systematic mapping study by Petersen et al. (Inf Softw Technol 64:1–18, 2015) to analyze SysML publications from 2005 to 2017. Our analysis revealed the following main findings: (i) there is a growing scientific interest in SysML in the last years particularly in the research field of Software Engineering, (ii) SysML is mostly used in the design or validation phase, rather than in the implementation phase, (iii) the most commonly used diagram types are the SysML-specific requirement diagram, parametric diagram, and block diagram, together with the activity diagram and state machine diagram known from UML, (iv) SysML is a specific UML profile mostly used in systems engineering; however, the language has to be customized to accommodate domain-specific aspects, (v) related to collaborations for SysML research over the world, there are more individual research groups than large international networks. This study provides a solid basis for classifying existing approaches for SysML. Researchers can use our results (i) for identifying open research issues, (ii) for a better understanding of the state of the art, and (iii) as a reference for finding specific approaches about SysML.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The OMG standard Systems Modeling Language (SysML) has been on the market for about thirteen years. This standard is an extended subset of UML providing a graphical modeling language for designing complex systems by considering software as well as hardware parts. Over the period of thirteen years, many publications have covered various aspects of SysML in different research fields. The aim of this paper is to conduct a systematic mapping study about SysML to identify the different categories of papers, (i) to get an overview of existing research topics and groups, (ii) to identify whether there are any publication trends, and (iii) to uncover possible missing links. We followed the guidelines for conducting a systematic mapping study by Petersen et al. (Inf Softw Technol 64:1–18, 2015) to analyze SysML publications from 2005 to 2017. Our analysis revealed the following main findings: (i) there is a growing scientific interest in SysML in the last years particularly in the research field of Software Engineering, (ii) SysML is mostly used in the design or validation phase, rather than in the implementation phase, (iii) the most commonly used diagram types are the SysML-specific requirement diagram, parametric diagram, and block diagram, together with the activity diagram and state machine diagram known from UML, (iv) SysML is a specific UML profile mostly used in systems engineering; however, the language has to be customized to accommodate domain-specific aspects, (v) related to collaborations for SysML research over the world, there are more individual research groups than large international networks. This study provides a solid basis for classifying existing approaches for SysML. Researchers can use our results (i) for identifying open research issues, (ii) for a better understanding of the state of the art, and (iii) as a reference for finding specific approaches about SysML. |
Kessentini, Wael; Sahraoui, Houari; Wimmer, Manuel Automated metamodel/model co-evolution: A search-based approach Journal Article Information and Software Technology, 106 , pp. 49-67, 2019, ISSN: 0950-5849. @article{Kessentini2019ammmce, title = {Automated metamodel/model co-evolution: A search-based approach}, author = {Wael Kessentini and Houari Sahraoui and Manuel Wimmer}, doi = {10.1016/j.infsof.2018.09.003}, issn = {0950-5849}, year = {2019}, date = {2019-02-00}, journal = {Information and Software Technology}, volume = {106}, pages = {49-67}, keywords = {}, pubstate = {published}, tppubtype = {article} } |
Burgueño, Loli; Mayerhofer, Tanja; Wimmer, Manuel; Vallecillo, Antonio Specifying quantities in software models Journal Article Information and Software Technology, 113 , pp. 82-97, 2019, ISSN: 0950-5849. @article{Burgueno2019quantities, title = {Specifying quantities in software models}, author = {Loli Burgueño and Tanja Mayerhofer and Manuel Wimmer and Antonio Vallecillo}, doi = {10.1016/j.infsof.2019.05.006}, issn = {0950-5849}, year = {2019}, date = {2019-00-00}, journal = {Information and Software Technology}, volume = {113}, pages = {82-97}, keywords = {}, pubstate = {published}, tppubtype = {article} } |
Ameller, D; Franch, X; C., ; Martínez-Fernández, S; Araujo, J; Biffl, S; Cabot, J; Cortellessa, V; Fernández, Méndez D; Moreira, A; Muccini, H; Vallecillo, A; Wimmer, Manuel; Amaral, V; Böhm, W; Brunelière, H; Burgueño, Loli; Goulão, M; Teufl, S; Berardinelli, L Dealing with Non-Functional Requirements in Model-Driven Development: A Survey Journal Article IEEE Transactions on Software Engineering, 2019. @article{Ameller2019nfr, title = {Dealing with Non-Functional Requirements in Model-Driven Development: A Survey}, author = {D. Ameller and X. Franch and C. and S. Martínez-Fernández and J. Araujo and S. Biffl and J. Cabot and V. Cortellessa and D. Méndez Fernández and A. Moreira and H. Muccini and A. Vallecillo and Manuel Wimmer and V. Amaral and W. Böhm and H. Brunelière and Loli Burgueño and M. Goulão and S. Teufl and L. Berardinelli}, doi = {10.1109/TSE.2019.2904476}, year = {2019}, date = {2019-00-00}, journal = {IEEE Transactions on Software Engineering}, abstract = {Managing Non-Functional Requirements (NFRs) in software projects is challenging, and projects that adopt Model-Driven Development (MDD) are no exception. Although several methods and techniques have been proposed to face this challenge, there is still little evidence on how NFRs are handled in MDD by practitioners. Knowing more about the state of the practice may help researchers to steer their research and practitioners to improve their daily work. Objective: In this paper, we present our findings from an interview-based survey conducted with practitioners working in 18 different companies from 6 European countries. From a practitioner´s point of view, the paper shows what barriers and benefits the management of NFRs as part of the MDD process can bring to companies, how NFRs are supported by MDD approaches, and which strategies are followed when (some) types of NFRs are not supported by MDD approaches. Results: Our study shows that practitioners perceive MDD adoption as a complex process with little to no tool support for NFRs, reporting productivity and maintainability as the types of NFRs expected to be supported when MDD is adopted. But in general, companies adapt MDD to deal with NFRs. When NFRs are not supported, the generated code is sometimes changed manually, thus compromising the maintainability of the software developed. However, the interviewed practitioners claim that the benefits of using MDD outweight the extra effort required by these manual adaptations. Conclusion: Overall, the results indicate that it is important for practitioners to handle NFRs in MDD, but further research is necessary in order to lower the barrier for supporting a broad spectrum of NFRs with MDD. Still, much conceptual and tool implementation work seems to be necessary to lower the barrier of integrating the broad spectrum of NFRs in practice.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Managing Non-Functional Requirements (NFRs) in software projects is challenging, and projects that adopt Model-Driven Development (MDD) are no exception. Although several methods and techniques have been proposed to face this challenge, there is still little evidence on how NFRs are handled in MDD by practitioners. Knowing more about the state of the practice may help researchers to steer their research and practitioners to improve their daily work. Objective: In this paper, we present our findings from an interview-based survey conducted with practitioners working in 18 different companies from 6 European countries. From a practitioner´s point of view, the paper shows what barriers and benefits the management of NFRs as part of the MDD process can bring to companies, how NFRs are supported by MDD approaches, and which strategies are followed when (some) types of NFRs are not supported by MDD approaches. Results: Our study shows that practitioners perceive MDD adoption as a complex process with little to no tool support for NFRs, reporting productivity and maintainability as the types of NFRs expected to be supported when MDD is adopted. But in general, companies adapt MDD to deal with NFRs. When NFRs are not supported, the generated code is sometimes changed manually, thus compromising the maintainability of the software developed. However, the interviewed practitioners claim that the benefits of using MDD outweight the extra effort required by these manual adaptations. Conclusion: Overall, the results indicate that it is important for practitioners to handle NFRs in MDD, but further research is necessary in order to lower the barrier for supporting a broad spectrum of NFRs with MDD. Still, much conceptual and tool implementation work seems to be necessary to lower the barrier of integrating the broad spectrum of NFRs in practice. |
2018 |
Ceravolo, Paolo; Azzini, Antonia; Angelini, Marco; Catarci, Tiziana; Cudré-Mauroux, Philippe; Damiani, Ernesto; Mazak, Alexandra; Keulen, Maurice Van; Jarrar, Mustafa; Santucci, Giuseppe; Sattler, Kai-Uwe; Scannapieco, Monica; Wimmer, Manuel; Wrembel, Robert; Zaraket, Fadi Big Data Semantics Journal Article Journal on Data Semantics, 2018, ISSN: 1861-2040. @article{Ceravolo2018bds, title = {Big Data Semantics}, author = {Paolo Ceravolo and Antonia Azzini and Marco Angelini and Tiziana Catarci and Philippe Cudré-Mauroux and Ernesto Damiani and Alexandra Mazak and Maurice Van Keulen and Mustafa Jarrar and Giuseppe Santucci and Kai-Uwe Sattler and Monica Scannapieco and Manuel Wimmer and Robert Wrembel and Fadi Zaraket}, doi = {10.1007/s13740-018-0086-2}, issn = {1861-2040}, year = {2018}, date = {2018-05-23}, journal = {Journal on Data Semantics}, abstract = {Big Data technology has discarded traditional data modeling approaches as no longer applicable to distributed data processing. It is, however, largely recognized that Big Data impose novel challenges in data and infrastructure management. Indeed, multiple components and procedures must be coordinated to ensure a high level of data quality and accessibility for the application layers, e.g., data analytics and reporting. In this paper, the third of its kind co-authored by members of IFIP WG 2.6 on Data Semantics, we propose a review of the literature addressing these topics and discuss relevant challenges for future research. Based on our literature review, we argue that methods, principles, and perspectives developed by the Data Semantics community can significantly contribute to address Big Data challenges.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Big Data technology has discarded traditional data modeling approaches as no longer applicable to distributed data processing. It is, however, largely recognized that Big Data impose novel challenges in data and infrastructure management. Indeed, multiple components and procedures must be coordinated to ensure a high level of data quality and accessibility for the application layers, e.g., data analytics and reporting. In this paper, the third of its kind co-authored by members of IFIP WG 2.6 on Data Semantics, we propose a review of the literature addressing these topics and discuss relevant challenges for future research. Based on our literature review, we argue that methods, principles, and perspectives developed by the Data Semantics community can significantly contribute to address Big Data challenges. |
Wolny, Sabine; Mazak, Alexandra; Wimmer, Manuel; Konlechner, Rafael; Kappel, Gerti Model-Driven Time-Series Analytics Journal Article International Journal of Conceptual Modeling, 13 , pp. 252-261, 2018. @article{Wolny2018mdtsa, title = {Model-Driven Time-Series Analytics}, author = {Sabine Wolny and Alexandra Mazak and Manuel Wimmer and Rafael Konlechner and Gerti Kappel}, url = {https://cdl-mint.se.jku.at/case-study-artefacts-for-emisa-2017/}, year = {2018}, date = {2018-02-27}, journal = {International Journal of Conceptual Modeling}, volume = {13}, pages = {252-261}, keywords = {}, pubstate = {published}, tppubtype = {article} } |
Mazak, Alexandra; Lueder, Arndt; Wolny, Sabine; Wimmer, Manuel; Winkler, Dietmar; Rosendahl, Ronald; Bayanifar, H; Biffl, S Model-Based Generation of Run-Time Data Collection Systems Exploiting AutomationML Journal Article at - Automatisierungstechnik, 66 , pp. 819-833, 2018. @article{Mazak2018mbg, title = {Model-Based Generation of Run-Time Data Collection Systems Exploiting AutomationML}, author = {Alexandra Mazak and Arndt Lueder and Sabine Wolny and Manuel Wimmer and Dietmar Winkler and Ronald Rosendahl and H. Bayanifar and S. Biffl}, year = {2018}, date = {2018-00-00}, journal = {at - Automatisierungstechnik}, volume = {66}, pages = {819-833}, abstract = {Production system operators need support for collecting and pre-processing data on production systems consisting of several system components, as foundation for optimization and defect detection. Traditional approaches based on hard-coded programming of such runtime data collection systems take time and effort, and require both domain and technology knowledge. In this article, we introduce the AML-RTDC approach, which combines the strengths of AutomationML (AML) data modeling and model-driven engineering, to reduce the manual effort for realizing the run-time data collection (RTDC) system. We evaluate the feasibility of the AML-RTDC approach with a demonstration case about a lab-sized production system and a use case based on real-world requirements.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Production system operators need support for collecting and pre-processing data on production systems consisting of several system components, as foundation for optimization and defect detection. Traditional approaches based on hard-coded programming of such runtime data collection systems take time and effort, and require both domain and technology knowledge. In this article, we introduce the AML-RTDC approach, which combines the strengths of AutomationML (AML) data modeling and model-driven engineering, to reduce the manual effort for realizing the run-time data collection (RTDC) system. We evaluate the feasibility of the AML-RTDC approach with a demonstration case about a lab-sized production system and a use case based on real-world requirements. |
Ángel, Mora Segura; de Lara, Juan; Neubauer, Patrick; Wimmer, Manuel Automated modelling assistance by integrating heterogeneous information sources Journal Article Computer Languages, Systems & Structures, 53 , pp. 90-120, 2018, ISSN: 1477-8424. @article{Angel2018, title = {Automated modelling assistance by integrating heterogeneous information sources}, author = {Mora Segura Ángel and Juan de Lara and Patrick Neubauer and Manuel Wimmer}, doi = {10.1016/j.cl.2018.02.002}, issn = {1477-8424}, year = {2018}, date = {2018-00-00}, journal = {Computer Languages, Systems & Structures}, volume = {53}, pages = {90-120}, abstract = {Model-Driven Engineering (MDE) uses models as its main assets in the software development process. The structure of a model is described through a meta-model. Even though modelling and meta-modelling are recurrent activities in MDE and a vast amount of MDE tools exist nowadays, they are tasks typically performed in an unassisted way. Usually, these tools cannot extract useful knowledge available in heterogeneous information sources like XML, RDF, CSV or other models and meta-models. We propose an approach to provide modelling and meta-modelling assistance. The approach gathers heterogeneous information sources in various technological spaces, and represents them uniformly in a common data model. This enables their uniform querying, by means of an extensible mechanism, which can make use of services, e.g., for synonym search and word sense analysis. The query results can then be easily incorporated into the (meta-)model being built. The approach has been realized in the Extremo tool, developed as an Eclipse plugin. Extremo has been validated in the context of two domains – production systems and process modelling – taking into account a large and complex industrial standard for classification and product description. Further validation results indicate that the integration of Extremo in various modelling environments can be achieved with low effort, and that the tool is able to handle information from most existing technological spaces.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Model-Driven Engineering (MDE) uses models as its main assets in the software development process. The structure of a model is described through a meta-model. Even though modelling and meta-modelling are recurrent activities in MDE and a vast amount of MDE tools exist nowadays, they are tasks typically performed in an unassisted way. Usually, these tools cannot extract useful knowledge available in heterogeneous information sources like XML, RDF, CSV or other models and meta-models. We propose an approach to provide modelling and meta-modelling assistance. The approach gathers heterogeneous information sources in various technological spaces, and represents them uniformly in a common data model. This enables their uniform querying, by means of an extensible mechanism, which can make use of services, e.g., for synonym search and word sense analysis. The query results can then be easily incorporated into the (meta-)model being built. The approach has been realized in the Extremo tool, developed as an Eclipse plugin. Extremo has been validated in the context of two domains – production systems and process modelling – taking into account a large and complex industrial standard for classification and product description. Further validation results indicate that the integration of Extremo in various modelling environments can be achieved with low effort, and that the tool is able to handle information from most existing technological spaces. |
2017 |
Bergmayr, Alexander; Breitenbücher, Uwe; Ferry, Nicolas; Rossini, Alessandro; Solberg, Anor; Wimmer, Manuel; Kappel, Gerti; Leymann, Frank A Systematic Review of Cloud Modeling Languages Journal Article ACM Computing Surveys, pp. 1-39, 2017. @article{Leymann01.1, title = {A Systematic Review of Cloud Modeling Languages}, author = {Alexander Bergmayr and Uwe Breitenbücher and Nicolas Ferry and Alessandro Rossini and Anor Solberg and Manuel Wimmer and Gerti Kappel and Frank Leymann}, year = {2017}, date = {2017-11-20}, journal = {ACM Computing Surveys}, pages = {1-39}, keywords = {}, pubstate = {published}, tppubtype = {article} } |
Bill, Robert; Fleck, Martin; Troya, Javier; Mayerhofer, Tanja; Wimmer, Manuel A Local and Global Tour on MOMoT Journal Article Software and Systems Modeling, 2017. @article{Wimmer11.1, title = {A Local and Global Tour on MOMoT}, author = {Robert Bill and Martin Fleck and Javier Troya and Tanja Mayerhofer and Manuel Wimmer}, year = {2017}, date = {2017-11-20}, journal = {Software and Systems Modeling}, keywords = {}, pubstate = {published}, tppubtype = {article} } |
Bruneliere, Hugo; Burger, Erik; Cabot, Jordi; Wimmer, Manuel A feature-based survey of model view approaches Journal Article Software and Systems Modeling, pp. 1-22, 2017. @article{Wimmer2017, title = {A feature-based survey of model view approaches}, author = {Hugo Bruneliere and Erik Burger and Jordi Cabot and Manuel Wimmer}, doi = {10.1007/s10270-017-0622-9}, year = {2017}, date = {2017-09-15}, journal = {Software and Systems Modeling}, pages = {1-22}, abstract = {When dealing with complex systems, information is very often fragmented across many different models expressed within a variety of (modeling) languages. To provide the relevant information in an appropriate way to different kinds of stakeholders, (parts of) such models have to be combined and potentially revamped by focusing on concerns of particular interest for them. Thus, mechanisms to define and compute views over models are highly needed. Several approaches have already been proposed to provide (semi)automated support for dealing with such model views. This paper provides a detailed overview of the current state of the art in this area. To achieve this, we relied on our own experiences of designing and applying such solutions in order to conduct a literature review on this topic. As a result, we discuss the main capabilities of existing approaches and propose a corresponding research agenda. We notably contribute a feature model describing what we believe to be the most important characteristics of the support for views on models. We expect this work to be helpful to both current and potential future users and developers of model view techniques, as well as to any person generally interested in model-based software and systems engineering.}, keywords = {}, pubstate = {published}, tppubtype = {article} } When dealing with complex systems, information is very often fragmented across many different models expressed within a variety of (modeling) languages. To provide the relevant information in an appropriate way to different kinds of stakeholders, (parts of) such models have to be combined and potentially revamped by focusing on concerns of particular interest for them. Thus, mechanisms to define and compute views over models are highly needed. Several approaches have already been proposed to provide (semi)automated support for dealing with such model views. This paper provides a detailed overview of the current state of the art in this area. To achieve this, we relied on our own experiences of designing and applying such solutions in order to conduct a literature review on this topic. As a result, we discuss the main capabilities of existing approaches and propose a corresponding research agenda. We notably contribute a feature model describing what we believe to be the most important characteristics of the support for views on models. We expect this work to be helpful to both current and potential future users and developers of model view techniques, as well as to any person generally interested in model-based software and systems engineering. |
Mansoor, Usman; Kessentini, Marouane; Wimmer, Manuel; Deb, Kalyanmoy Multi-view refactoring of class and activity diagrams using a multi-objective evolutionary algorithm Journal Article Software Quality Journal, 25 (2), pp. 473-501, 2017, ISSN: 1573-1367. @article{Mansoor2017mvr, title = {Multi-view refactoring of class and activity diagrams using a multi-objective evolutionary algorithm}, author = {Usman Mansoor and Marouane Kessentini and Manuel Wimmer and Kalyanmoy Deb}, doi = {10.1007/s11219-015-9284-4}, issn = {1573-1367}, year = {2017}, date = {2017-06-01}, journal = {Software Quality Journal}, volume = {25}, number = {2}, pages = {473-501}, abstract = {To improve the quality of software systems, one of the widely used techniques is refactoring defined as the process of improving the design of an existing system by changing its internal structure without altering the external behavior. The majority of existing refactoring work focuses mainly on the source code level. The suggestion of refactorings at the model level is more challenging due to the difficulty to evaluate: (a) the impact of the suggested refactorings applied to a diagram on other related diagrams to improve the overall system quality, (b) their feasibility, and (c) interdiagram consistency. We propose, in this paper, a novel framework that enables software designers to apply refactoring at the model level. To this end, we used a multi-objective evolutionary algorithm to find a trade-off between improving the quality of class and activity diagrams. The proposed multi-objective approach provides a multi-view for software designers to evaluate the impact of suggested refactorings applied to class diagrams on related activity diagrams in order to evaluate the overall quality, and check their feasibility and behavior preservation. The statistical evaluation performed on models extracted from four open-source systems confirms the efficiency of our approach.}, keywords = {}, pubstate = {published}, tppubtype = {article} } To improve the quality of software systems, one of the widely used techniques is refactoring defined as the process of improving the design of an existing system by changing its internal structure without altering the external behavior. The majority of existing refactoring work focuses mainly on the source code level. The suggestion of refactorings at the model level is more challenging due to the difficulty to evaluate: (a) the impact of the suggested refactorings applied to a diagram on other related diagrams to improve the overall system quality, (b) their feasibility, and (c) interdiagram consistency. We propose, in this paper, a novel framework that enables software designers to apply refactoring at the model level. To this end, we used a multi-objective evolutionary algorithm to find a trade-off between improving the quality of class and activity diagrams. The proposed multi-objective approach provides a multi-view for software designers to evaluate the impact of suggested refactorings applied to class diagrams on related activity diagrams in order to evaluate the overall quality, and check their feasibility and behavior preservation. The statistical evaluation performed on models extracted from four open-source systems confirms the efficiency of our approach. |
Kessentini, Marouane; Wimmer, Manuel Guest Editorial Special Issue on Computational Intelligence for Software Engineering and Services Computing Journal Article IEEE Transactions on Emerging Topics in Computational Intelligence, 1 (3), pp. 143-144, 2017, ISSN: 2471-285X. @article{Kessentini2017gesi, title = {Guest Editorial Special Issue on Computational Intelligence for Software Engineering and Services Computing}, author = {Marouane Kessentini and Manuel Wimmer}, doi = {10.1109/TETCI.2017.2700659}, issn = {2471-285X}, year = {2017}, date = {2017-05-29}, booktitle = {IEEE Trans. Emerging Topics in Comput. Intellig.}, journal = {IEEE Transactions on Emerging Topics in Computational Intelligence}, volume = {1}, number = {3}, pages = {143-144}, keywords = {}, pubstate = {published}, tppubtype = {article} } |
Bousse, Erwan; Leroy, Dorian; Combemale, Benoit; Wimmer, Manuel; Baudry, Benoit Omniscient debugging for executable DSLs Journal Article Journal of Systems and Software, 137 , pp. 261-288, 2017, ISSN: 0164-1212. @article{Bousse2017od, title = {Omniscient debugging for executable DSLs}, author = {Erwan Bousse and Dorian Leroy and Benoit Combemale and Manuel Wimmer and Benoit Baudry}, doi = {10.1016/j.jss.2017.11.025}, issn = {0164-1212}, year = {2017}, date = {2017-01-01}, journal = {Journal of Systems and Software}, volume = {137}, pages = {261-288}, keywords = {}, pubstate = {published}, tppubtype = {article} } |
Mayerhofer, Tanja; Wimmer, Manuel; Berardinelli, Luca; Drath, Rainer A Model-Driven Engineering Workbench for CAEX Supporting Language Customization and Evolution Journal Article IEEE Transactions on Industrial Informatics, 2017, ISSN: 1551-3203. @article{Mayerhofer2017mdew, title = {A Model-Driven Engineering Workbench for CAEX Supporting Language Customization and Evolution}, author = {Tanja Mayerhofer and Manuel Wimmer and Luca Berardinelli and Rainer Drath}, doi = {10.1109/TII.2017.2786780}, issn = {1551-3203}, year = {2017}, date = {2017-01-01}, journal = {IEEE Transactions on Industrial Informatics}, keywords = {}, pubstate = {published}, tppubtype = {article} } |
Inproceedings
2021 |
Leroy, Dorian; Bousse, Erwan; Wimmer, Manuel; Combemale, Benoit; Mayerhofer, Tanja; Schwinger, Wieland Behavioral Interfaces for Executable DSLs Inproceedings Koziolek, Anne; Schaefer, Ina; Seidl, Christoph (Ed.): Software Engineering 2021, Fachtagung des GI-Fachbereichs Softwaretechnik, Braunschweig, Deutschland, virtuell, 22.-26. Februar 2021., pp. 133-134, Gesellschaft für Informatik e.V., 2021, ISBN: 978-3-88579-704-3. @inproceedings{wimmer2021, title = {Behavioral Interfaces for Executable DSLs}, author = {Dorian Leroy and Erwan Bousse and Manuel Wimmer and Benoit Combemale and Tanja Mayerhofer and Wieland Schwinger }, editor = {Anne Koziolek and Ina Schaefer and Christoph Seidl}, doi = {10.18420/SE2021_25}, isbn = {978-3-88579-704-3}, year = {2021}, date = {2021-02-18}, booktitle = {Software Engineering 2021, Fachtagung des GI-Fachbereichs Softwaretechnik, Braunschweig, Deutschland, virtuell, 22.-26. Februar 2021.}, volume = {P310}, pages = {133-134}, publisher = {Gesellschaft für Informatik e.V.}, abstract = {A large amount of domain-specific languages (DSLs) are used to represent behavioral aspects of systems in the form of behavioral models [BCW17]. Executable domain-specific languages (xDSLs) enable the execution of behavioral models [Ma13]. While an execution is mostly driven by the model’s content (e.g., control structures, conditionals, transitions,method calls), many use cases require interacting with the running model, such as simulating scenarios in an automated or interactive way or coupling system models with environment models. The management of these interactions is usually hard-coded into the semantics of xDSLs, which prevents its reuse for other xDSLs and the provision of generic interaction tools. To tackle these issues, we propose a novel metalanguage for complementing the definition ofxDSLs with explicit behavioral interfaces to enable external tools to interact with executable models in a unified way. A behavioral interface defines a set of events specifying how external tools can interact with models that conform to xDSLs implementing the interface.Additionally, we define two types of relationships involving behavioral interfaces the implementation relationship and the subtyping relationship. An implementation relationship ties a behavioral interface to a given operational semantics implementation. Subtyping relationships allow to build event abstraction hierarchies, indicating that events from one interface can be abstracted or refined as events from another interface.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } A large amount of domain-specific languages (DSLs) are used to represent behavioral aspects of systems in the form of behavioral models [BCW17]. Executable domain-specific languages (xDSLs) enable the execution of behavioral models [Ma13]. While an execution is mostly driven by the model’s content (e.g., control structures, conditionals, transitions,method calls), many use cases require interacting with the running model, such as simulating scenarios in an automated or interactive way or coupling system models with environment models. The management of these interactions is usually hard-coded into the semantics of xDSLs, which prevents its reuse for other xDSLs and the provision of generic interaction tools. To tackle these issues, we propose a novel metalanguage for complementing the definition ofxDSLs with explicit behavioral interfaces to enable external tools to interact with executable models in a unified way. A behavioral interface defines a set of events specifying how external tools can interact with models that conform to xDSLs implementing the interface.Additionally, we define two types of relationships involving behavioral interfaces the implementation relationship and the subtyping relationship. An implementation relationship ties a behavioral interface to a given operational semantics implementation. Subtyping relationships allow to build event abstraction hierarchies, indicating that events from one interface can be abstracted or refined as events from another interface. |
Rabiser, Rick; Vogel-Heuser, Birgit; Wimmer, Manuel; Zoitl, Alois Workshop on Software Engineering in Cyber-Physical Production Systems (SECPPS’21) Inproceedings Koziolek, Anne; Schaefer, Ina; Seidl, Christoph (Ed.): Workshop on Software Engineering in Cyber-Physical Production Systems (SECPPS’21), Software Engineering 2021. Bonn, Germany., pp. 133-134, Gesellschaft für Informatik e.V., 2021. @inproceedings{wimmer2021c, title = {Workshop on Software Engineering in Cyber-Physical Production Systems (SECPPS’21)}, author = {Rick Rabiser and Birgit Vogel-Heuser and Manuel Wimmer and Alois Zoitl}, editor = {Anne Koziolek and Ina Schaefer and Christoph Seidl}, doi = {10.18420/SE2021_53}, year = {2021}, date = {2021-02-18}, booktitle = {Workshop on Software Engineering in Cyber-Physical Production Systems (SECPPS’21), Software Engineering 2021. Bonn, Germany.}, volume = {P310}, pages = {133-134}, publisher = {Gesellschaft für Informatik e.V.}, abstract = {This workshop focuses on Software Engineering in Cyber-Physical Production Systems. Itis an interactive workshop opened by keynotes and statements by participants, followed by extensive discussions in break-out groups. The output of the workshop is a research roadmap as well as concrete networking activities to further establish a community in this interdisciplinary field.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } This workshop focuses on Software Engineering in Cyber-Physical Production Systems. Itis an interactive workshop opened by keynotes and statements by participants, followed by extensive discussions in break-out groups. The output of the workshop is a research roadmap as well as concrete networking activities to further establish a community in this interdisciplinary field. |
Wimmer, Manuel From Model Versioning to Variability-Augmented Modelling Technologies Inproceedings VAMOS 2021, 15th International Working Conference on Variability Modelling of Software-Intensive Systems, Krems, Austria, virtuell, February 9–11, 2021, pp. 2:1, ACM, 2021. @inproceedings{Wimmer21, title = {From Model Versioning to Variability-Augmented Modelling Technologies}, author = {Manuel Wimmer}, doi = {10.1145/3442391.3442394}, year = {2021}, date = {2021-02-12}, booktitle = {VAMOS 2021, 15th International Working Conference on Variability Modelling of Software-Intensive Systems, Krems, Austria, virtuell, February 9–11, 2021}, pages = {2:1}, publisher = {ACM}, abstract = {Version control systems are an essential part of the software development infrastructure. While traditional systems mostly focus on code-based artefacts, recent trends such as Cyber Physical Systems (CPS) require to support model-based artefacts as well – especially in interdisciplinary settings. As a consequence, several dedicated approaches for model versioning have been proposed recently. In this talk, I will review the active research field of model versioning, establish a common terminology, introduce the various techniques and technologies applied in current model versioning systems, and conclude with open issues and challenges such as the need for variability-augmented modelling technologies.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } Version control systems are an essential part of the software development infrastructure. While traditional systems mostly focus on code-based artefacts, recent trends such as Cyber Physical Systems (CPS) require to support model-based artefacts as well – especially in interdisciplinary settings. As a consequence, several dedicated approaches for model versioning have been proposed recently. In this talk, I will review the active research field of model versioning, establish a common terminology, introduce the various techniques and technologies applied in current model versioning systems, and conclude with open issues and challenges such as the need for variability-augmented modelling technologies. |
2020 |
Garmendia, Antonio; Wimmer, Manuel; Guerra, Esther; Gómez-Martínez, Elena; de Lara, Juan Automated Variability Injection for Graphical Modelling Languages Inproceedings in Proceedings of the 19th ACM SIGPLAN International Conference on Generative Programming, Concepts and Experiences (GPCE 2020), November 16–17, 2020, Chicago, USA, Virtual, pp. 15-21, ACM, 2020. @inproceedings{wimmer2020c, title = {Automated Variability Injection for Graphical Modelling Languages}, author = {Antonio Garmendia and Manuel Wimmer and Esther Guerra and Elena Gómez-Martínez and Juan de Lara}, url = {https://doi.org/10.1145/3425898.3426957}, doi = {10.1145/3425898.3426957}, year = {2020}, date = {2020-12-15}, booktitle = {in Proceedings of the 19th ACM SIGPLAN International Conference on Generative Programming, Concepts and Experiences (GPCE 2020), November 16–17, 2020, Chicago, USA, Virtual}, pages = {15-21}, publisher = {ACM}, abstract = {Model-based development approaches, such as Model-Driven Engineering (MDE), heavily rely on the use of modelling languages to achieve and automate software development tasks. To enable the definition of model variants (e.g., supporting the compact description of system families), one solution is to combine MDE with Software Product Lines. However, this is technically costly as it requires adapting many MDE artefacts associated to the modelling language – especially the meta-models and graphical environments. To alleviate this situation, we propose a method for the automated injection of variability into graphical modelling languages. Given the meta-model and graphical environment of a particular language, our approach permits configuring the allowed model variability, and the graphical environment is automatically adapted to enable creating models with variability. Our solution is implemented atop the Eclipse Modeling Framework and Sirius, and synthesizes adapted graphical editors integrated with Feature IDE.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } Model-based development approaches, such as Model-Driven Engineering (MDE), heavily rely on the use of modelling languages to achieve and automate software development tasks. To enable the definition of model variants (e.g., supporting the compact description of system families), one solution is to combine MDE with Software Product Lines. However, this is technically costly as it requires adapting many MDE artefacts associated to the modelling language – especially the meta-models and graphical environments. To alleviate this situation, we propose a method for the automated injection of variability into graphical modelling languages. Given the meta-model and graphical environment of a particular language, our approach permits configuring the allowed model variability, and the graphical environment is automatically adapted to enable creating models with variability. Our solution is implemented atop the Eclipse Modeling Framework and Sirius, and synthesizes adapted graphical editors integrated with Feature IDE. |
Bork, Dominik; Garmendia, Antonio; Wimmer, Manuel Towards a Multi-Objective Modularization Approach for Entity-Relationship Models Inproceedings Michael, Judith; Torres, Victoria (Ed.): Forum, Demo and Posters 2020 co-located with 39th International Conference on Conceptual Modeling ER 2020, Vienna, Austria, November 3-6, 2020, pp. 45-58, CEUR-WS.org, 2020. @inproceedings{BorkGW20, title = {Towards a Multi-Objective Modularization Approach for Entity-Relationship Models}, author = {Dominik Bork and Antonio Garmendia and Manuel Wimmer}, editor = {Judith Michael and Victoria Torres}, url = {http://ceur-ws.org/Vol-2716/paper4.pdf}, year = {2020}, date = {2020-11-05}, booktitle = {Forum, Demo and Posters 2020 co-located with 39th International Conference on Conceptual Modeling ER 2020, Vienna, Austria, November 3-6, 2020}, volume = {2716}, pages = {45-58}, publisher = {CEUR-WS.org}, abstract = {Legacy systems and their associated data models often evolve into large, monolithic artifacts. This threatens comprehensibility and maintainability by human beings. Breaking down a monolith into a modular structure is an established technique in software engineering. Several previous works aimed to adapt modularization also for conceptual data models. However, we currently see a research gap manifested in the absence of:(i)a flexible and extensible modularization concept for Entity Relationship (ER) models; (ii )of openly available tool support; and (iii) empirical evaluation. With this paper, we introduce a generic encoding of a modularization concept for ER models which enables the use of meta-heuristic search approaches. For the efficient application we introduce the ModulER tool. Eventually, we report on a twofold evaluation: First, we demonstrate feasibility and performance of the approach by two demonstration cases. Second, we report on an initial empirical experiment and a survey we conducted with modelers to compare automated modularizations with manually created ones and to better understand how humans approach ER modularization.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } Legacy systems and their associated data models often evolve into large, monolithic artifacts. This threatens comprehensibility and maintainability by human beings. Breaking down a monolith into a modular structure is an established technique in software engineering. Several previous works aimed to adapt modularization also for conceptual data models. However, we currently see a research gap manifested in the absence of:(i)a flexible and extensible modularization concept for Entity Relationship (ER) models; (ii )of openly available tool support; and (iii) empirical evaluation. With this paper, we introduce a generic encoding of a modularization concept for ER models which enables the use of meta-heuristic search approaches. For the efficient application we introduce the ModulER tool. Eventually, we report on a twofold evaluation: First, we demonstrate feasibility and performance of the approach by two demonstration cases. Second, we report on an initial empirical experiment and a survey we conducted with modelers to compare automated modularizations with manually created ones and to better understand how humans approach ER modularization. |
Horváth, Benedek; Hováth, Ákos; Wimmer, Manuel Towards the next generation of reactive model transformations on low-code platforms: three research lines Inproceedings Guerra, Esther; Iovino, Ludovico (Ed.): 23rd International Conference on Model Driven Engineering Languages and Systems, Virtual Event, Canada, 16-23 October, 2020, Companion Proceedings, pp. 65:1-65:10, ACM, 2020. @inproceedings{Horvath0W20, title = {Towards the next generation of reactive model transformations on low-code platforms: three research lines}, author = {Benedek Horváth and Ákos Hováth and Manuel Wimmer }, editor = {Esther Guerra and Ludovico Iovino}, url = {https://doi.org/10.1145/3417990.3420199}, doi = {10.1145/3417990.3420199}, year = {2020}, date = {2020-10-29}, booktitle = {23rd International Conference on Model Driven Engineering Languages and Systems, Virtual Event, Canada, 16-23 October, 2020, Companion Proceedings}, pages = {65:1-65:10}, publisher = {ACM}, abstract = {Low-Code Development Platforms have emerged as the next-generation, cloud-enabled collaborative platforms. These platforms adopt the principles of Model-Driven Engineering, where models are used as first-class citizens to build complex systems, and model transformations are employed to keep a consistent view between the different aspects of them. Due to the online nature of low-code platforms, users expect them to be responsive, to complete complex operations in a short time. To support such complex collaboration scenarios, the next-generation of low-code platforms must (𝑖) offer a multi-tenant environment to manage the collaborative work of engineers, (𝑖𝑖) provide a model processing paradigm scaling up to hundreds of millions of elements, and (𝑖𝑖𝑖) provide engineers a set of selection criteria to choose the right model transformation engine in multi-tenant execution environments.In this paper, we outline three research lines to improve the performance of reactive model Transformations on low-code platforms, by motivating our research with a case study from a systems engineering domain.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } Low-Code Development Platforms have emerged as the next-generation, cloud-enabled collaborative platforms. These platforms adopt the principles of Model-Driven Engineering, where models are used as first-class citizens to build complex systems, and model transformations are employed to keep a consistent view between the different aspects of them. Due to the online nature of low-code platforms, users expect them to be responsive, to complete complex operations in a short time. To support such complex collaboration scenarios, the next-generation of low-code platforms must (𝑖) offer a multi-tenant environment to manage the collaborative work of engineers, (𝑖𝑖) provide a model processing paradigm scaling up to hundreds of millions of elements, and (𝑖𝑖𝑖) provide engineers a set of selection criteria to choose the right model transformation engine in multi-tenant execution environments.In this paper, we outline three research lines to improve the performance of reactive model Transformations on low-code platforms, by motivating our research with a case study from a systems engineering domain. |
Colantoni, Alessandro; Berardinelli, Luca; Wimmer, Manuel DevOpsML: Towards Modeling DevOps Processes and Platforms Inproceedings Guerra, Esther; Iovino, Ludovico (Ed.): 23rd International Conference on Model Driven Engineering Languages and Systems, Virtual Event, Canada, 16-23 October, 2020, Companion Proceedings, pp. 69:1 - 69:10, ACM, 2020. @inproceedings{ColantoniBW20, title = {DevOpsML: Towards Modeling DevOps Processes and Platforms}, author = {Alessandro Colantoni and Luca Berardinelli and Manuel Wimmer}, editor = {Esther Guerra and Ludovico Iovino}, url = {https://doi.org/10.1145/3417990.3420203}, doi = {10.1145/3417990.3420203}, year = {2020}, date = {2020-10-29}, booktitle = {23rd International Conference on Model Driven Engineering Languages and Systems, Virtual Event, Canada, 16-23 October, 2020, Companion Proceedings}, pages = {69:1 - 69:10}, publisher = {ACM}, abstract = {DevOps and Model Driven Engineering (MDE) provide differently skilled IT stakeholders with methodologies and tools for organizing and automating continuous software engineering activities–from development to operations, and using models as key engineering artifacts, respectively. Both DevOps and MDE aim at shortening the development life-cycle, dealing with complexity, and improve software process and product quality. The integration of DevOps and MDE principles and practices in low-code engineering platforms (LCEP) are gaining attention by the research community. However, at the same time, new requirements are upcoming for DevOps and MDE as LCEPs are often used by non-technical users, to deliver fully functional software. This is in particular challenging for current DevOps processes, which are mostly considered on the technological level, and thus, excluding most of the current LCEP users. The systematic use of models and modeling to lowering the learning curve of DevOps processes and platforms seems beneficial to make them also accessible for non-technical users. In this paper, we introduce DevOpsML, a conceptual framework for modeling and combining DevOps processes and platforms. Tools along with their interfaces and capabilities are the building blocks of DevOps platform configurations, which can be mapped to software engineering processes of arbitrary complexity. We show our initial endeavors on DevOpsML and present a research roadmap how to employ the resulting DevOpsML framework for different use cases. }, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } DevOps and Model Driven Engineering (MDE) provide differently skilled IT stakeholders with methodologies and tools for organizing and automating continuous software engineering activities–from development to operations, and using models as key engineering artifacts, respectively. Both DevOps and MDE aim at shortening the development life-cycle, dealing with complexity, and improve software process and product quality. The integration of DevOps and MDE principles and practices in low-code engineering platforms (LCEP) are gaining attention by the research community. However, at the same time, new requirements are upcoming for DevOps and MDE as LCEPs are often used by non-technical users, to deliver fully functional software. This is in particular challenging for current DevOps processes, which are mostly considered on the technological level, and thus, excluding most of the current LCEP users. The systematic use of models and modeling to lowering the learning curve of DevOps processes and platforms seems beneficial to make them also accessible for non-technical users. In this paper, we introduce DevOpsML, a conceptual framework for modeling and combining DevOps processes and platforms. Tools along with their interfaces and capabilities are the building blocks of DevOps platform configurations, which can be mapped to software engineering processes of arbitrary complexity. We show our initial endeavors on DevOpsML and present a research roadmap how to employ the resulting DevOpsML framework for different use cases. |
Bordeleau, Francis; Combemale, Benoit; Eramo, Romina; van den Brand, Mark; Wimmer, Manuel Towards Model-Driven Digital Twin Engineering: Current Opportunities and Future Challenges Inproceedings Babur, Önder; Enil, Joachim; Vogel-Heuser, Birgit (Ed.): Systems Modelling and Management - First International Conference, {ICSMM} 2020, Bergen, Norway, June 25-26, 2020, Proceedings, pp. 43-54, Springer, 2020. @inproceedings{BordeleauCEBW20, title = {Towards Model-Driven Digital Twin Engineering: Current Opportunities and Future Challenges}, author = {Francis Bordeleau and Benoit Combemale and Romina Eramo and Mark van den Brand and Manuel Wimmer}, editor = {Önder Babur and Joachim Enil and Birgit Vogel-Heuser}, url = {https://doi.org/10.1007/978-3-030-58167-1_4}, doi = {10.1007/978-3-030-58167-1_4}, year = {2020}, date = {2020-10-22}, booktitle = {Systems Modelling and Management - First International Conference, {ICSMM} 2020, Bergen, Norway, June 25-26, 2020, Proceedings}, volume = {1262}, pages = {43-54}, publisher = {Springer}, abstract = {Digital Twins have emerged since the beginning of this millennium to better support the management of systems based on (real-time) data collected in different parts of the operating systems. Digital Twins have been successfully used in many application domains, and thus, are considered as an important aspect of Model-Based Systems Engineering (MBSE). However, their development , maintenance, and evolution still face major challenges, in particular: (i) the management of heterogeneous models from different disciplines, (ii) the bi-directional synchronization of digital twins and the actual systems, and (iii) the support for collaborative development throughout the complete life-cycle. In the last decades, the Model-Driven Engineering (MDE) community has investigated these challenges in the context of software systems. Now the question arises, which results may be applicable for digital twin engineering as well. In this paper, we identify various MDE techniques and technologies which may contribute to tackle the three mentioned digital twin challenges as well as outline a set of open MDE research challenges that need to be addressed in order to move towards a digital twin engineering discipline.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } Digital Twins have emerged since the beginning of this millennium to better support the management of systems based on (real-time) data collected in different parts of the operating systems. Digital Twins have been successfully used in many application domains, and thus, are considered as an important aspect of Model-Based Systems Engineering (MBSE). However, their development , maintenance, and evolution still face major challenges, in particular: (i) the management of heterogeneous models from different disciplines, (ii) the bi-directional synchronization of digital twins and the actual systems, and (iii) the support for collaborative development throughout the complete life-cycle. In the last decades, the Model-Driven Engineering (MDE) community has investigated these challenges in the context of software systems. Now the question arises, which results may be applicable for digital twin engineering as well. In this paper, we identify various MDE techniques and technologies which may contribute to tackle the three mentioned digital twin challenges as well as outline a set of open MDE research challenges that need to be addressed in order to move towards a digital twin engineering discipline. |
Lehner, Daniel; Wolny, Sabine; Mazak-Huemer, Alexandra; Wimmer, Manuel Towards a Reference Architecture for Leveraging Model Repositories for Digital Twins Inproceedings 25th (IEEE) International Conference on Emerging Technologies and Factory Automation, ETFA 2020, Vienna, Austria, September 8-11, 2020, pp. 1077-1080, IEEE, 2020. @inproceedings{LehnerWMW20, title = {Towards a Reference Architecture for Leveraging Model Repositories for Digital Twins}, author = {Daniel Lehner and Sabine Wolny and Alexandra Mazak-Huemer and Manuel Wimmer}, url = {https://doi.org/10.1109/ETFA46521.2020.9212109}, doi = {10.1109/ETFA46521.2020.9212109}, year = {2020}, date = {2020-10-15}, booktitle = {25th (IEEE) International Conference on Emerging Technologies and Factory Automation, ETFA 2020, Vienna, Austria, September 8-11, 2020}, pages = {1077-1080}, publisher = {IEEE}, abstract = {In the area of Cyber-Physical Systems (CPS), the degree of complexity continuously increases mainly due to new key-enabling technologies supporting those systems. One way to deal with this increasing complexity is to create a digital representation of such systems, a so-called Digital Twin (DT), which virtually acts in parallel ideally across the entire life-cycle of a CPS. For this purpose, the DT uses simulated or real-time data to mimic operations, control, and may modify the CPS’s behaviour at runtime. However, building such DTs from scratch is not trivial, mainly due to the integration needed to deal with heterogeneous systems residing in different technological spaces. In order to tackle this challenge, Model-Driven Engineering (MDE) allows to logically model a CPS with its physical components. Usually, in MDE such “logical models” are created at design time which keep them detached from the deployed system during runtime. Instead of building bilateral solutions between each runtime environment and every engineering tool, a dedicated integration layer is needed which can deal with both, design and runtime aspects. Therefore, we present a reference architecture that allows on the one side to query data from model repositories to enrich the running system with design-time knowledge, and on the other side, to be able to reasoning about system states at runtime in design-time models. We introduce a model repository query and management engine as mediator and show its feasibility by a demonstration case.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } In the area of Cyber-Physical Systems (CPS), the degree of complexity continuously increases mainly due to new key-enabling technologies supporting those systems. One way to deal with this increasing complexity is to create a digital representation of such systems, a so-called Digital Twin (DT), which virtually acts in parallel ideally across the entire life-cycle of a CPS. For this purpose, the DT uses simulated or real-time data to mimic operations, control, and may modify the CPS’s behaviour at runtime. However, building such DTs from scratch is not trivial, mainly due to the integration needed to deal with heterogeneous systems residing in different technological spaces. In order to tackle this challenge, Model-Driven Engineering (MDE) allows to logically model a CPS with its physical components. Usually, in MDE such “logical models” are created at design time which keep them detached from the deployed system during runtime. Instead of building bilateral solutions between each runtime environment and every engineering tool, a dedicated integration layer is needed which can deal with both, design and runtime aspects. Therefore, we present a reference architecture that allows on the one side to query data from model repositories to enrich the running system with design-time knowledge, and on the other side, to be able to reasoning about system states at runtime in design-time models. We introduce a model repository query and management engine as mediator and show its feasibility by a demonstration case. |
Garmendia, Antonio; Wimmer, Manuel; Mazak-Huemer, Alexandra; Guerra, Esther; de Lara, Juan Modelling Production System Families with AutomationML Inproceedings 25th {IEEE} International Conference on Emerging Technologies and Factory Automation, {ETFA} 2020, Vienna, Austria, September 8-11, 2020, pp. 1057-1060, IEEE, 2020. @inproceedings{GarmendiaWMGL20, title = {Modelling Production System Families with AutomationML}, author = {Antonio Garmendia and Manuel Wimmer and Alexandra Mazak-Huemer and Esther Guerra and Juan de Lara}, url = {https://doi.org/10.1109/ETFA46521.2020.9211894}, doi = {10.1109/ETFA46521.2020.9211894}, year = {2020}, date = {2020-10-15}, booktitle = {25th {IEEE} International Conference on Emerging Technologies and Factory Automation, {ETFA} 2020, Vienna, Austria, September 8-11, 2020}, pages = {1057-1060}, publisher = {IEEE}, abstract = {The description of families of production systems usually relies on the use of variability modelling. This aspect of modelling is gaining increasing interest with the emergence of Industry 4.0 to facilitate the product development as new requirements appear. As a consequence, there are several emerging modelling techniques able to apply variability in different domains. In this paper, we introduce an approach to establish product system families in AutomationML. Our approach is based on the definition of feature models describing the variability space, and on the assignment of presence conditions to AutomationML model elements. These conditions (de-)select the model elements depending on the chosen configuration. This way, it is possible to model a large set of model variants in a compact way using one single model. To realize our approach, we started from an existing EMF-based AutomationML workbench providing graphical modelling support. From these artifacts,we synthesized an extended graphical modelling editor with variability support, integrated with FeatureIDE. Furthermore, we validated our approach by creating and managing a production system family encompassing six scenarios of the Pick and Place Unit Industry 4.0 demonstrator.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } The description of families of production systems usually relies on the use of variability modelling. This aspect of modelling is gaining increasing interest with the emergence of Industry 4.0 to facilitate the product development as new requirements appear. As a consequence, there are several emerging modelling techniques able to apply variability in different domains. In this paper, we introduce an approach to establish product system families in AutomationML. Our approach is based on the definition of feature models describing the variability space, and on the assignment of presence conditions to AutomationML model elements. These conditions (de-)select the model elements depending on the chosen configuration. This way, it is possible to model a large set of model variants in a compact way using one single model. To realize our approach, we started from an existing EMF-based AutomationML workbench providing graphical modelling support. From these artifacts,we synthesized an extended graphical modelling editor with variability support, integrated with FeatureIDE. Furthermore, we validated our approach by creating and managing a production system family encompassing six scenarios of the Pick and Place Unit Industry 4.0 demonstrator. |
Lang, Laurens; Wally, Bernhard; Huemer, Christian; Sindelar, Radek; Mazak-Huemer, Alexandra; Wimmer, Manuel A Graphical Toolkit for IEC 62264-2 Inproceedings Gao, Robert X; Ehmann, Kornel (Ed.): 53rd CIRP Conference on Manufactoring Systems 2020, July 1-3, 2020, Chicago, USA, pp. 532-537, 2020. @inproceedings{wimmer2020e, title = {A Graphical Toolkit for IEC 62264-2}, author = {Laurens Lang and Bernhard Wally and Christian Huemer and Radek Sindelar and Alexandra Mazak-Huemer and Manuel Wimmer}, editor = {Robert X. Gao and Kornel Ehmann}, url = {https://doi.org/10.1016/j.procir.2020.03.049}, doi = {10.1016/j.procir.2020.03.049}, year = {2020}, date = {2020-09-22}, booktitle = {53rd CIRP Conference on Manufactoring Systems 2020, July 1-3, 2020, Chicago, USA}, journal = {Procedia CIRP}, volume = {93}, pages = {532-537}, abstract = {Among the plethora of industrial standards available in the context of smart manufacturing, one series of standards is consistently being mentioned for dealing with manufacturing operations management: IEC 62264. Its second part provides a conceptual model for the description of production systems and their capabilities, including runtime information such as concrete maintenance schedules or achieved production goals. In this work, we present a concrete graphical syntax and toolkit for the creation and presentation of IEC 62264-2 compliant models, using techniques from model-driven (software) engineering. We have evaluated our tool by conducting a user study for assessing its usability and effectiveness.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } Among the plethora of industrial standards available in the context of smart manufacturing, one series of standards is consistently being mentioned for dealing with manufacturing operations management: IEC 62264. Its second part provides a conceptual model for the description of production systems and their capabilities, including runtime information such as concrete maintenance schedules or achieved production goals. In this work, we present a concrete graphical syntax and toolkit for the creation and presentation of IEC 62264-2 compliant models, using techniques from model-driven (software) engineering. We have evaluated our tool by conducting a user study for assessing its usability and effectiveness. |
Sanctis, Martina De; Iovino, Ludovico; Rossi, Maria Teresa; Wimmer, Manuel A Flexible Architecture for Key Performance Indicators Assessment in Smart Cities Inproceedings Jansen, Anton; Malavolta, Ivano; Muccini, Henry; Zimmermann, Olaf (Ed.): Software Architecture for Key Performance Indicators Assessment in Smart Cities, pp. 118-135, Springer, 2020, ISBN: 978-3-030-58922-6. @inproceedings{SanctisIRW20, title = {A Flexible Architecture for Key Performance Indicators Assessment in Smart Cities}, author = {Martina De Sanctis and Ludovico Iovino and Maria Teresa Rossi and Manuel Wimmer}, editor = {Anton Jansen and Ivano Malavolta and Henry Muccini and Olaf Zimmermann}, url = {https://doi.org/10.1007/978-3-030-58923-3_8}, doi = {10.1007/978-3-030-58923-3_8}, isbn = {978-3-030-58922-6}, year = {2020}, date = {2020-09-19}, booktitle = {Software Architecture for Key Performance Indicators Assessment in Smart Cities}, volume = {12292}, pages = {118-135}, publisher = {Springer}, abstract = {The concept of smart and sustainable city has been on the agenda for the last decade. Smart governance is about the use of innovation for supporting enhanced decision making and planning to make a city smart, by leveraging on Key Performance Indicators (KPIs) as procedural tools. However, developing processes and instruments able to evaluate smart cities is still a challenging task, due to the rigidity showed by the existing frameworks in the definition of KPIs and modeling of the subjects to be evaluated. Web-based platforms, spreadsheets or even Cloud-based applications offer limited flexibility, if the stakeholder is interested not only in using but also in defining the pieces of the puzzle to be composed. In this paper we present a flexible architecture supporting a model-driven approach for the KPIs assessment in smart cities. It identifies both required and optional components and functionalities needed for realizing the automatic KPIs assessment, while showing flexibility points allowing for different specification of the architecture, thus of the overall methodology.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } The concept of smart and sustainable city has been on the agenda for the last decade. Smart governance is about the use of innovation for supporting enhanced decision making and planning to make a city smart, by leveraging on Key Performance Indicators (KPIs) as procedural tools. However, developing processes and instruments able to evaluate smart cities is still a challenging task, due to the rigidity showed by the existing frameworks in the definition of KPIs and modeling of the subjects to be evaluated. Web-based platforms, spreadsheets or even Cloud-based applications offer limited flexibility, if the stakeholder is interested not only in using but also in defining the pieces of the puzzle to be composed. In this paper we present a flexible architecture supporting a model-driven approach for the KPIs assessment in smart cities. It identifies both required and optional components and functionalities needed for realizing the automatic KPIs assessment, while showing flexibility points allowing for different specification of the architecture, thus of the overall methodology. |
Iovino, Ludovico; Wimmer, Manuel; Rocco, Juri Di Modeling Smart Cities Inproceedings STAF 2020 Workshop Proceedings: 4th Workshop on Model-Driven Engineering for the Internet-of-Things, co-located with Software Technologies: Applications and Foundations federation of conferences (STAF 2020) Bergen, Norway, June 22-26, 2020., pp. 3, 2020. @inproceedings{wimmer2020g, title = {Modeling Smart Cities}, author = {Ludovico Iovino and Manuel Wimmer and Juri Di Rocco}, url = {http://ceur-ws.org/Vol-2707/moscpreface.pdf}, year = {2020}, date = {2020-07-01}, booktitle = {STAF 2020 Workshop Proceedings: 4th Workshop on Model-Driven Engineering for the Internet-of-Things, co-located with Software Technologies: Applications and Foundations federation of conferences (STAF 2020) Bergen, Norway, June 22-26, 2020.}, volume = {2707}, pages = {3}, abstract = {Making a city ”smart” is an emerging strategy to mitigate the problems generated by the urban population growth and rapid urbanization. Each city models the technological, organizational and policy aspects of that city, and for this reason a smart city is an interplay among technological, organizational and policy innovation. The complexity of inter actions not only in hardware and software involved, but also in the actors and processes interplay, makes the availability of systematic design processes a must.Model Driven Engineering (MDE) improves coordination between the various stakeholders, resulting in the qualitative production of software and other artifacts involved. MDE has been successfully used in businesses with a need for complex and error-proof software, such as companies operating in the high tech industry. Integrating MDE approaches in Smart City design processes may lead to more robust solutions.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } Making a city ”smart” is an emerging strategy to mitigate the problems generated by the urban population growth and rapid urbanization. Each city models the technological, organizational and policy aspects of that city, and for this reason a smart city is an interplay among technological, organizational and policy innovation. The complexity of inter actions not only in hardware and software involved, but also in the actors and processes interplay, makes the availability of systematic design processes a must.Model Driven Engineering (MDE) improves coordination between the various stakeholders, resulting in the qualitative production of software and other artifacts involved. MDE has been successfully used in businesses with a need for complex and error-proof software, such as companies operating in the high tech industry. Integrating MDE approaches in Smart City design processes may lead to more robust solutions. |
Ciccozzi, Federico; Ferry, Nikolas; Mosser, Sebastian; Solberg, Amor; Wimmer, Manuel Model-Driven Engineering for the Internet-of-Things Inproceedings STAF 2020 Workshop Proceedings: 4th Workshop on Model-Driven Engineering for the Internet-of-Things, co-located with Software Technologies: Applications and Foundations federation of conferences (STAF 2020) Bergen, Norway, June 22-26, 2020., pp. 3, 2020. @inproceedings{wimmer2020f, title = {Model-Driven Engineering for the Internet-of-Things}, author = {Federico Ciccozzi and Nikolas Ferry and Sebastian Mosser and Amor Solberg and Manuel Wimmer }, url = {http://ceur-ws.org/Vol-2707/mde4iotpreface.pdf}, year = {2020}, date = {2020-06-30}, booktitle = {STAF 2020 Workshop Proceedings: 4th Workshop on Model-Driven Engineering for the Internet-of-Things, co-located with Software Technologies: Applications and Foundations federation of conferences (STAF 2020) Bergen, Norway, June 22-26, 2020.}, volume = {2707}, pages = {3}, abstract = {A recent forecast from the International Data Corporation (IDC) envi-sions that 41 billion Internet-of-Things (IoT) endpoints will be in use by20251, representing great business opportunities. The next generation IoTsystems needs to perform distributed processing and coordinated behavioracross IoT, edge and cloud infrastructures, manage the closed loop fromsensing to actuation, and cope with vast heterogeneity, scalability and dy-namicity of IoT systems and their environments.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } A recent forecast from the International Data Corporation (IDC) envi-sions that 41 billion Internet-of-Things (IoT) endpoints will be in use by20251, representing great business opportunities. The next generation IoTsystems needs to perform distributed processing and coordinated behavioracross IoT, edge and cloud infrastructures, manage the closed loop fromsensing to actuation, and cope with vast heterogeneity, scalability and dy-namicity of IoT systems and their environments. |
Franch, Xavier; Seyff, Norbert; Oriol, Marc; Fricker, Samuel; Groher, Iris; Vierhauser, Michael; Wimmer, Manuel Towards Integrating Data-Driven Requirements Engineering into the Software Development Process: A Vision Paper Inproceedings Madhavji, Nazim H; Pasquale, Liliana; Ferrari, Alessio; Gnesi, Stefania (Ed.): 26th International Working Conference on Requirements Engineering: Foundation for Software Quality, REFSQ 2020, Pisa, Italy, March 24-27, 2020, was postponed, pp. 135-142, Springer, 2020. @inproceedings{FranchSOFGVW20, title = {Towards Integrating Data-Driven Requirements Engineering into the Software Development Process: A Vision Paper}, author = {Xavier Franch and Norbert Seyff and Marc Oriol and Samuel Fricker and Iris Groher and Michael Vierhauser and Manuel Wimmer}, editor = {Nazim H. Madhavji and Liliana Pasquale and Alessio Ferrari and Stefania Gnesi}, url = {https://doi.org/10.1007/978-3-030-44429-7_10}, doi = {10.1007/978-3-030-44429-7_10}, year = {2020}, date = {2020-06-10}, booktitle = {26th International Working Conference on Requirements Engineering: Foundation for Software Quality, REFSQ 2020, Pisa, Italy, March 24-27, 2020, was postponed}, volume = {12045}, pages = {135-142}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, abstract = {[Context and motivation] Modern software engineering processes have shifted from traditional upfront requirements engineering (RE) to a more continuous way of conducting RE, particularly including data-driven approaches. [Question/problem] However, current research on data-driven RE focuses more on leveraging certain techniques such as natural language processing or machine learning than on making the concept fit for facilitating its use in the entire software development process. [Principal ideas/results] In this paper, we propose a research agenda composed of six distinct research directions. These include a data-driven RE infrastructure, embracing data heterogeneity, context-aware adaptation, data analysis and decision support, privacy and confidentiality, and finally process integration. Each of these directions addresses challenges that impede the broader use of data-driven RE. [Contribution] For researchers, our research agenda provides topics relevant to investigate. For practitioners, overcoming the underlying challenges with the help of the proposed research will allow to adopt a data-driven RE approach and facilitate its seamless integration into modern software engineering. For users, the proposed research will enable the transparency, control, and security needed to trust software systems and software providers.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } [Context and motivation] Modern software engineering processes have shifted from traditional upfront requirements engineering (RE) to a more continuous way of conducting RE, particularly including data-driven approaches. [Question/problem] However, current research on data-driven RE focuses more on leveraging certain techniques such as natural language processing or machine learning than on making the concept fit for facilitating its use in the entire software development process. [Principal ideas/results] In this paper, we propose a research agenda composed of six distinct research directions. These include a data-driven RE infrastructure, embracing data heterogeneity, context-aware adaptation, data analysis and decision support, privacy and confidentiality, and finally process integration. Each of these directions addresses challenges that impede the broader use of data-driven RE. [Contribution] For researchers, our research agenda provides topics relevant to investigate. For practitioners, overcoming the underlying challenges with the help of the proposed research will allow to adopt a data-driven RE approach and facilitate its seamless integration into modern software engineering. For users, the proposed research will enable the transparency, control, and security needed to trust software systems and software providers. |
John, Stefan; Burdusel, Alexandru; Bill, Robert; Strüber, Daniel; Taentzer, Gabriele; Zschaler, Steffen; Wimmer, Manuel Searching for Optimal Models: Comparing Two Encoding Approaches Inproceedings Felderer, Michael; Hasselbring, Wilhelm; Rabiser, Rick; Jung, Reiner (Ed.): Software Engineering 2020, Fachtagung des GI-Fachbereichs Softwaretechnik, 24.-28. Februar 2020, Innsbruck, Austria , pp. 101-103, Gesellschaft für Informatik e.V., 2020. @inproceedings{0001BB0TZW20, title = {Searching for Optimal Models: Comparing Two Encoding Approaches}, author = {Stefan John and Alexandru Burdusel and Robert Bill and Daniel Strüber and Gabriele Taentzer and Steffen Zschaler and Manuel Wimmer}, editor = {Michael Felderer and Wilhelm Hasselbring and Rick Rabiser and Reiner Jung}, url = {https://doi.org/10.18420/SE2020_30}, doi = {10.18420/SE2020_30}, year = {2020}, date = {2020-02-06}, booktitle = {Software Engineering 2020, Fachtagung des GI-Fachbereichs Softwaretechnik, 24.-28. Februar 2020, Innsbruck, Austria }, volume = {P-300}, pages = {101-103}, publisher = {Gesellschaft für Informatik e.V.}, series = {LNI}, abstract = {Search-Based Software Engineering (SBSE) is about solving software development problems by formulating them as optimisation problems. In the last years, combining SBSE and Model-Driven Engineering (MDE), where models and model transformations are treated as key artifacts in the development of complex systems, has become increasingly popular. While search-based techniques have often successfully been applied to tackle MDE problems, a recent line of research investigates how a model-driven design can make optimisation more easily accessible to a wider audience. In previous model-driven optimisation efforts, a major design decision concerns the way in which solutions are encoded. Two main options have been explored: a model-based encoding representing candidate solutions as models, and a rule-based encoding representing them as sequences of transformation rule applications. While both encodings have been applied to different use cases, no study has yet compared them systematically. To close this gap, we evaluate both approaches on a common set of optimization problems, investigating their impact on the optimization performance. Additionally, we discuss their differences, strengths, and weaknesses laying the foundation for a knowledgeable choice of the right encoding for the right problem.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } Search-Based Software Engineering (SBSE) is about solving software development problems by formulating them as optimisation problems. In the last years, combining SBSE and Model-Driven Engineering (MDE), where models and model transformations are treated as key artifacts in the development of complex systems, has become increasingly popular. While search-based techniques have often successfully been applied to tackle MDE problems, a recent line of research investigates how a model-driven design can make optimisation more easily accessible to a wider audience. In previous model-driven optimisation efforts, a major design decision concerns the way in which solutions are encoded. Two main options have been explored: a model-based encoding representing candidate solutions as models, and a rule-based encoding representing them as sequences of transformation rule applications. While both encodings have been applied to different use cases, no study has yet compared them systematically. To close this gap, we evaluate both approaches on a common set of optimization problems, investigating their impact on the optimization performance. Additionally, we discuss their differences, strengths, and weaknesses laying the foundation for a knowledgeable choice of the right encoding for the right problem. |
Feldmann, Stefan; Kernschmidt, Konstantin; Wimmer, Manuel; Vogel-Heuser, Birgit Managing Inter-Model Inconsistencies in Model-based Systems Engineering Inproceedings Felderer, Michael; Hasselbring, Wilhelm; Rabiser, Rick; Jung, Reiner (Ed.): Software Engineering 2020, Fachtagung des GI-Fachbereichs Softwaretechnik, 24.-28. Februar 2020, Innsbruck, Austria, pp. 99-100, Gesellschaft für Informatik e.V., 2020. @inproceedings{FeldmannKMV20, title = {Managing Inter-Model Inconsistencies in Model-based Systems Engineering}, author = {Stefan Feldmann and Konstantin Kernschmidt and Manuel Wimmer and Birgit Vogel-Heuser}, editor = {Michael Felderer and Wilhelm Hasselbring and Rick Rabiser and Reiner Jung}, url = {https://dl.gi.de/handle/20.500.12116/31706}, doi = {10.18420/SE2020_29}, year = {2020}, date = {2020-02-06}, booktitle = {Software Engineering 2020, Fachtagung des GI-Fachbereichs Softwaretechnik, 24.-28. Februar 2020, Innsbruck, Austria}, volume = {300}, pages = {99-100}, publisher = {Gesellschaft für Informatik e.V.}, abstract = {This work summarizes our paper [Fe19] originally published in the Journal of Systems and Software in 2019 about a model-based inconsistency management approach.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } This work summarizes our paper [Fe19] originally published in the Journal of Systems and Software in 2019 about a model-based inconsistency management approach. |
Combemale, Benoît; Wimmer, Manuel Towards a Model-Based DevOps for Cyber-Physical Systems Inproceedings Bruel, Jean-Michel; Mazzara, Manuel; Meyer, Bertrand (Ed.): Software Engineering Aspects of Continuous Development and New Paradigms of Software Production and Deployment - Second International Workshop, DEVOPS 2019, Château de Villebrumier, France, May 6-8, 2019, Revised Selected Papers, pp. 84-94, Springer, 2020. @inproceedings{CombemaleW19, title = {Towards a Model-Based DevOps for Cyber-Physical Systems}, author = {Benoît Combemale and Manuel Wimmer}, editor = {Jean-Michel Bruel and Manuel Mazzara and Bertrand Meyer}, url = {https://doi.org/10.1007/978-3-030-39306-9_6}, doi = {10.1007/978-3-030-39306-9_6}, year = {2020}, date = {2020-01-21}, booktitle = {Software Engineering Aspects of Continuous Development and New Paradigms of Software Production and Deployment - Second International Workshop, DEVOPS 2019, Château de Villebrumier, France, May 6-8, 2019, Revised Selected Papers}, volume = {12055}, pages = {84-94}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, abstract = {The emerging field of Cyber-Physical Systems (CPS) calls for new scenarios of the use of models. In particular, CPS require to support both the integration of physical and cyber parts in innovative complex systems or production chains, together with the management of the data gathered from the environment to drive dynamic reconfiguration at runtime or finding improved designs. In such a context, the engineering of CPS must rely on models to uniformly reason about various heterogeneous concerns all along the system life cycle. In the last decades, the use of models has been intensively investigated both at design time for driving the development of complex systems, and at runtime as a reasoning layer to support deployment, monitoring and runtime adaptations. However, the approaches remain mostly independent. With the advent of DevOps principles, the engineering of CPS would benefit from supporting a smooth continuum of models from design to runtime, and vice versa. In this vision paper, we introduce a vision for supporting model-based DevOps practices, and we infer the corresponding research roadmap for the modeling community to address this vision by discussing a CPS demonstrator. }, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } The emerging field of Cyber-Physical Systems (CPS) calls for new scenarios of the use of models. In particular, CPS require to support both the integration of physical and cyber parts in innovative complex systems or production chains, together with the management of the data gathered from the environment to drive dynamic reconfiguration at runtime or finding improved designs. In such a context, the engineering of CPS must rely on models to uniformly reason about various heterogeneous concerns all along the system life cycle. In the last decades, the use of models has been intensively investigated both at design time for driving the development of complex systems, and at runtime as a reasoning layer to support deployment, monitoring and runtime adaptations. However, the approaches remain mostly independent. With the advent of DevOps principles, the engineering of CPS would benefit from supporting a smooth continuum of models from design to runtime, and vice versa. In this vision paper, we introduce a vision for supporting model-based DevOps practices, and we infer the corresponding research roadmap for the modeling community to address this vision by discussing a CPS demonstrator. |
2019 |
Neubauer, Patrick; Bill, Robert; Kolovos, Dimitris S; Paige, Richard F; Wimmer, Manuel Reusable Textual Notations for Domain-Specific Languages Inproceedings Brucker, Achim D; Daniel, Gwendal; Jouault, Frédéric (Ed.): 19th International Workshop in OCL and Textual Modeling (OCL 2019) co-located with IEEE/ACM 22nd International Conference on Model Driven Engineering Languages and Systems (MODELS 2019), Munich, Germany, September 16, 2019, pp. 67-80, CEUR-WS.org, 2019. @inproceedings{NeubauerBKPW19, title = {Reusable Textual Notations for Domain-Specific Languages}, author = {Patrick Neubauer and Robert Bill and Dimitris S. Kolovos and Richard F. Paige and Manuel Wimmer}, editor = {Achim D. Brucker and Gwendal Daniel and Frédéric Jouault}, url = {http://ceur-ws.org/Vol-2513/paper6.pdf}, year = {2019}, date = {2019-12-31}, booktitle = {19th International Workshop in OCL and Textual Modeling (OCL 2019) co-located with IEEE/ACM 22nd International Conference on Model Driven Engineering Languages and Systems (MODELS 2019), Munich, Germany, September 16, 2019}, volume = {2513}, pages = {67-80}, publisher = {CEUR-WS.org}, series = {CEUR Workshop Proceedings}, abstract = {Domain-specific languages enable concise and precise formalization of domain concepts and promote direct employment by domain experts. Therefore, syntactic constructs are introduced to empower users to associate concepts and relationships with visual textual symbols. Model-based language engineering facilitates the description of concepts and relationships in an abstract manner. However, concrete representations are commonly attached to abstract do-main representations, such as annotations in metamodels, or directly encoded into language grammar and thus introduce redundancy between metamodel elements and grammar elements. In this work, we propose an approach that enables autonomous development and maintenance of domain concepts and textual language notations in a distinctive and metamodel-agnostic manner by employing style models containing grammar rule templates and injection-based property selection. We provide an implementation and showcase the proposed notation-specification language in a comparison with state of the art practices during the creation of notations for an executable domain-specific modeling language based on the Eclipse Modeling Framework and Xtext.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } Domain-specific languages enable concise and precise formalization of domain concepts and promote direct employment by domain experts. Therefore, syntactic constructs are introduced to empower users to associate concepts and relationships with visual textual symbols. Model-based language engineering facilitates the description of concepts and relationships in an abstract manner. However, concrete representations are commonly attached to abstract do-main representations, such as annotations in metamodels, or directly encoded into language grammar and thus introduce redundancy between metamodel elements and grammar elements. In this work, we propose an approach that enables autonomous development and maintenance of domain concepts and textual language notations in a distinctive and metamodel-agnostic manner by employing style models containing grammar rule templates and injection-based property selection. We provide an implementation and showcase the proposed notation-specification language in a comparison with state of the art practices during the creation of notations for an executable domain-specific modeling language based on the Eclipse Modeling Framework and Xtext. |
Wolny, Sabine; Mazak, Alexandra; Wimmer, Manuel; Huemer, Christian Model-driven Runtime State Identification Inproceedings Proceedings of the Conference on Digital Ecosystems of the Future: Methods, Techniques and Applications (EMISA) - EMISA Forum, pp. 29-44, 2019. @inproceedings{Wolny2019mdrsi, title = {Model-driven Runtime State Identification}, author = {Sabine Wolny and Alexandra Mazak and Manuel Wimmer and Christian Huemer}, url = {https://cdl-mint.se.jku.at/wp-content/uploads/2020/04/EMISA_2019.pdf https://cdl-mint.se.jku.at/case-study-artefacts-for-emisa-2019/ }, year = {2019}, date = {2019-12-20}, booktitle = {Proceedings of the Conference on Digital Ecosystems of the Future: Methods, Techniques and Applications (EMISA) - EMISA Forum}, volume = {39}, number = {1}, pages = {29-44}, abstract = {With new advances such as Cyber-Physical Systems (CPS) and Internet of Things (IoT), more and more discrete software systems interact with continuous physical systems. State machines are a classical approach to specify the intended behavior of discrete systems during development. However, the actual realized behavior may deviate from those specified models due to environmental impacts, or measurement inaccuracies. Accordingly, data gathered at runtime should be validated against the specified model. A first step in this direction is to identify the individual system states of each execution of a system at runtime. This is a particular challenge for continuous systems where system states may be only identified by listening to sensor value streams. A further challenge is to raise these raw value streams on a model level for checking purposes. To tackle these challenges, we introduce a model-driven runtime state identification approach. In particular, we automatically derive corresponding time-series database queries from state machines in order to identify system runtime states based on the sensor value streams of running systems. We demonstrate our approach for a subset of SysML and evaluate it based on a case study of a simulated environment of a five-axes grip-arm robot within a working station.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } With new advances such as Cyber-Physical Systems (CPS) and Internet of Things (IoT), more and more discrete software systems interact with continuous physical systems. State machines are a classical approach to specify the intended behavior of discrete systems during development. However, the actual realized behavior may deviate from those specified models due to environmental impacts, or measurement inaccuracies. Accordingly, data gathered at runtime should be validated against the specified model. A first step in this direction is to identify the individual system states of each execution of a system at runtime. This is a particular challenge for continuous systems where system states may be only identified by listening to sensor value streams. A further challenge is to raise these raw value streams on a model level for checking purposes. To tackle these challenges, we introduce a model-driven runtime state identification approach. In particular, we automatically derive corresponding time-series database queries from state machines in order to identify system runtime states based on the sensor value streams of running systems. We demonstrate our approach for a subset of SysML and evaluate it based on a case study of a simulated environment of a five-axes grip-arm robot within a working station. |
Bousse, Erwan; Wimmer, Manuel Domain-Level Observation and Control for Compiled Executable DSLs Inproceedings Kessentini, Marouane; Yue, Tao; Pretschner, Alexander; Voss, Sebastian; Burgueno, Loli (Ed.): 22nd ACM/IEEE International Conference on Model Driven Engineering Languages and Systems, MODELS 2019, Munich, Germany, September 15-20, 2019, pp. 150-160, IEEE, 2019. @inproceedings{BousseW19, title = {Domain-Level Observation and Control for Compiled Executable DSLs}, author = {Erwan Bousse and Manuel Wimmer}, editor = {Marouane Kessentini and Tao Yue and Alexander Pretschner and Sebastian Voss and Loli Burgueno}, doi = {10.1109/MODELS.2019.000-6}, year = {2019}, date = {2019-12-02}, booktitle = {22nd ACM/IEEE International Conference on Model Driven Engineering Languages and Systems, MODELS 2019, Munich, Germany, September 15-20, 2019}, pages = {150-160}, publisher = {IEEE}, abstract = {Executable Domain-Specific Languages (DSLs) are commonly defined with either operational semantics (i.e., interpretation) or translational semantics (i.e., compilation). An interpreted DSL relies on domain concepts to specify the possible execution states and steps, which enables the observation and control of executions using the very same domain concepts. In contrast, a compiled DSL relies on a transformation to an arbitrarily different target language. This creates a conceptual gap, where the execution can only be observed and controlled through target domain concepts, to the detriment of experts or tools that only understand the source domain. To address this problem, we propose a language engineering architecture for compiled DSLs that enables the observation and control of executions using source domain concepts. The architecture requires the definition of the source domain execution steps and states, along with a feedback manager that translates steps and states of the target domain back to the source domain. We evaluate the architecture with two different compiled DSLs, and show that it does enable domain-level observation and control while increasing execution time by 2× in the worst observed case.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } Executable Domain-Specific Languages (DSLs) are commonly defined with either operational semantics (i.e., interpretation) or translational semantics (i.e., compilation). An interpreted DSL relies on domain concepts to specify the possible execution states and steps, which enables the observation and control of executions using the very same domain concepts. In contrast, a compiled DSL relies on a transformation to an arbitrarily different target language. This creates a conceptual gap, where the execution can only be observed and controlled through target domain concepts, to the detriment of experts or tools that only understand the source domain. To address this problem, we propose a language engineering architecture for compiled DSLs that enables the observation and control of executions using source domain concepts. The architecture requires the definition of the source domain execution steps and states, along with a feedback manager that translates steps and states of the target domain back to the source domain. We evaluate the architecture with two different compiled DSLs, and show that it does enable domain-level observation and control while increasing execution time by 2× in the worst observed case. |
Wolny, Sabine; Mazak, Alexandra; Wimmer, Manuel Automatic Reverse Engineering of Interaction Models from System Logs Inproceedings Proceedings of the 24th IEEE Conference on Emerging Technologies and Factory Automation (ETFA), Zaragoza, Spain, September 10-13, 2019, pp. 57-64, IEEE, 2019. @inproceedings{Wolny2019reverse, title = {Automatic Reverse Engineering of Interaction Models from System Logs}, author = {Sabine Wolny and Alexandra Mazak and Manuel Wimmer}, url = {https://cdl-mint.se.jku.at/case-study-artefacts-for-etfa-2019/}, doi = {10.1109/ETFA.2019.8869502}, year = {2019}, date = {2019-10-24}, booktitle = {Proceedings of the 24th IEEE Conference on Emerging Technologies and Factory Automation (ETFA), Zaragoza, Spain, September 10-13, 2019}, pages = {57-64}, publisher = {IEEE}, abstract = {Nowadays, software- as well as hardware systems produce log files that enable a continuous monitoring of the system during its execution. Unfortunately, such text-based log traces are very long and difficult to read, and therefore, reasoning and analyzing runtime behavior is not straightforward. However, dealing with log traces is especially needed in cases, where (i) the execution of the system did not perform as intended, (ii) the process flow is unknown because there are no records, and/or (iii) the design models do not correspond to its realworld counterpart. These facts cause that log data has to be prepared in a more user-friendly way (e.g., in form of graphical representations) and it takes that algorithms are needed for automatically monitoring the system’s operation, and for tracking the system components interaction patterns. For this purpose we present an approach for transforming raw sensor data logs to a UML or SysML sequence diagram in order to provide a graphical representation for tracking log traces in a time-ordered manner. Based on this sequence diagram, we automatically identify interaction models in order to analyze the runtime behavior of system components. We implement this approach as prototypical plug-in in the modeling tool Enterprise Architect and evaluate it by an example of a self-driving car.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } Nowadays, software- as well as hardware systems produce log files that enable a continuous monitoring of the system during its execution. Unfortunately, such text-based log traces are very long and difficult to read, and therefore, reasoning and analyzing runtime behavior is not straightforward. However, dealing with log traces is especially needed in cases, where (i) the execution of the system did not perform as intended, (ii) the process flow is unknown because there are no records, and/or (iii) the design models do not correspond to its realworld counterpart. These facts cause that log data has to be prepared in a more user-friendly way (e.g., in form of graphical representations) and it takes that algorithms are needed for automatically monitoring the system’s operation, and for tracking the system components interaction patterns. For this purpose we present an approach for transforming raw sensor data logs to a UML or SysML sequence diagram in order to provide a graphical representation for tracking log traces in a time-ordered manner. Based on this sequence diagram, we automatically identify interaction models in order to analyze the runtime behavior of system components. We implement this approach as prototypical plug-in in the modeling tool Enterprise Architect and evaluate it by an example of a self-driving car. |
Wally, Bernhard; Lang, Laurens; Wlodarski, Rafal; Sindelar, Radek; Huemer, Christian; Mazak, Alexandra; Wimmer, Manuel Generating Structured AutomationML Models from IEC 62264 Information Inproceedings Proceedings of the 5th AutomationML PlugFest 2019, 2019. @inproceedings{Wally2019gsa, title = {Generating Structured AutomationML Models from IEC 62264 Information}, author = {Bernhard Wally and Laurens Lang and Rafal Wlodarski and Radek Sindelar and Christian Huemer and Alexandra Mazak and Manuel Wimmer}, url = {https://cdl-mint.se.jku.at/generating-structured-automationml/}, year = {2019}, date = {2019-09-26}, booktitle = {Proceedings of the 5th AutomationML PlugFest 2019}, abstract = {AutomationML provides a versatile modeling environment for the description of production systems. However, when starting a new AutomationML project, or when serializing existing data with the AutomationML format, there are no rules on how to structure these models in a meaningful way. In this work, we present an approach for structuring AutomationML models, based on the IEC 62264 standard. In our approach we are implementing the process of serializing IEC 62264 information declaratively, by leveraging the power of model transformations, as they are applied in the context of model-driven (software) engineering.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } AutomationML provides a versatile modeling environment for the description of production systems. However, when starting a new AutomationML project, or when serializing existing data with the AutomationML format, there are no rules on how to structure these models in a meaningful way. In this work, we present an approach for structuring AutomationML models, based on the IEC 62264 standard. In our approach we are implementing the process of serializing IEC 62264 information declaratively, by leveraging the power of model transformations, as they are applied in the context of model-driven (software) engineering. |
Wurl, Alexander; Falkner, Andreas; Haselböck, Alois; Mazak, Alexandra; Filzmoser, Peter Exploring Robustness in a Combined Feature Selection Approach Inproceedings Hammoudi, Slimane; Quix, Christoph; Bernardino, Jorge (Ed.): Proceedings of the 8th International Conference on Data Science, Technology and Applications, DATA 2019, Prague, Czech Republic, July 26-28, 2019, pp. 84-91, SciTePress, 2019. @inproceedings{Wurl2019exploring, title = {Exploring Robustness in a Combined Feature Selection Approach}, author = {Alexander Wurl and Andreas Falkner and Alois Haselböck and Alexandra Mazak and Peter Filzmoser}, editor = {Slimane Hammoudi and Christoph Quix and Jorge Bernardino}, doi = {10.5220/0007924400840091}, year = {2019}, date = {2019-09-18}, booktitle = {Proceedings of the 8th International Conference on Data Science, Technology and Applications, DATA 2019, Prague, Czech Republic, July 26-28, 2019}, pages = {84-91}, publisher = {SciTePress}, abstract = {A crucial task in the bidding phase of industrial systems is a precise prediction of the number of hardware components of specific types for the proposal of a future project. Linear regression models, trained on data of past projects, are efficient in supporting such decisions. The number of features used by these regression models should be as small as possible, so that determining their quantities generates minimal effort. The fact that training data are often ambiguous, incomplete, and contain outlier makes challenging demands on the robustness of the feature selection methods used. We present a combined feature selection approach: (i) iteratively learn a robust well-fitted statistical model and rule out irrelevant features, (ii) perform redundancy analysis to rule out dispensable features. In a case study from the domain of hardware management in Rail Automation we show that this approach assures robustness in the calculation of hardware components. Ist auch noch nicht online.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } A crucial task in the bidding phase of industrial systems is a precise prediction of the number of hardware components of specific types for the proposal of a future project. Linear regression models, trained on data of past projects, are efficient in supporting such decisions. The number of features used by these regression models should be as small as possible, so that determining their quantities generates minimal effort. The fact that training data are often ambiguous, incomplete, and contain outlier makes challenging demands on the robustness of the feature selection methods used. We present a combined feature selection approach: (i) iteratively learn a robust well-fitted statistical model and rule out irrelevant features, (ii) perform redundancy analysis to rule out dispensable features. In a case study from the domain of hardware management in Rail Automation we show that this approach assures robustness in the calculation of hardware components. Ist auch noch nicht online. |
Wimmer, Manuel Design Science for Model-Driven Software and Systems Engineering Inproceedings IEEE / ACM 22nd International Conference on Model Driven Engineering Languages and Systems (MODELS), Doctoral Symposium, September 15-20, 2019 Munich, Germany, 2019. @inproceedings{wimmer2019c, title = {Design Science for Model-Driven Software and Systems Engineering}, author = {Manuel Wimmer}, url = {https://modelsconf19.org/?page_id=1933}, year = {2019}, date = {2019-09-17}, booktitle = {IEEE / ACM 22nd International Conference on Model Driven Engineering Languages and Systems (MODELS), Doctoral Symposium, September 15-20, 2019 Munich, Germany}, abstract = {Design Science is well-suited methodology to perform research in Model-Driven Software and Systems Engineering (MDSE). In addition, MDSE may help in performing Design Science with systematic methods to reason about possible designs. In my talk, I will give hints how to combine these two fields in order to have a solid basis for conducting a PhD thesis.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } Design Science is well-suited methodology to perform research in Model-Driven Software and Systems Engineering (MDSE). In addition, MDSE may help in performing Design Science with systematic methods to reason about possible designs. In my talk, I will give hints how to combine these two fields in order to have a solid basis for conducting a PhD thesis. |
Wimmer, Manuel Flexible Modeling by Prototype-based Languages and Inconsistency Management: Two Experiences from the Production System Domain Inproceedings FlexMDE 2019 - 5th Flexible MDE Workshop Tuesday, September 17, 2019, Munich, Germany, ACM/IEEE 22nd International Conference on Model Driven Engineering Languages and Systems (MODELS 2019), 2019. @inproceedings{wimmer2019d, title = {Flexible Modeling by Prototype-based Languages and Inconsistency Management: Two Experiences from the Production System Domain}, author = {Manuel Wimmer}, url = {https://docs.google.com/document/d/1CvJeu1sl69g59fZrZ4vYMhMUIFiBl5rPe9D0D7CiyAg/edit# }, year = {2019}, date = {2019-09-17}, booktitle = {FlexMDE 2019 - 5th Flexible MDE Workshop Tuesday, September 17, 2019, Munich, Germany, ACM/IEEE 22nd International Conference on Model Driven Engineering Languages and Systems (MODELS 2019)}, abstract = {Prototype/Clone-based modeling is an alternative to Class/Object-based modeling. While providing a good level of flexibility in the modeling process, prototype-based modeling languages also come with their own challenges. How such languages may provide flexibility and at the same time some degree of consistency is the first part of my talk. In the second part, I will present some ongoing work on dealing with consistency requirements by managing inconsistencies. In both parts, explicitly modeling consistency requirements and how to deal with occurring inconsistencies is the key. Finally, I conclude with an outlook on future challenges for flexible modeling in the model-based systems engineering domain.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } Prototype/Clone-based modeling is an alternative to Class/Object-based modeling. While providing a good level of flexibility in the modeling process, prototype-based modeling languages also come with their own challenges. How such languages may provide flexibility and at the same time some degree of consistency is the first part of my talk. In the second part, I will present some ongoing work on dealing with consistency requirements by managing inconsistencies. In both parts, explicitly modeling consistency requirements and how to deal with occurring inconsistencies is the key. Finally, I conclude with an outlook on future challenges for flexible modeling in the model-based systems engineering domain. |
Wurl, Alexander; Falkner, Andreas; Haselböck, Alois; Mazak, Alexandra A Conceptual Design of a Digital Companion for Failure Analysis in Rail Automation Inproceedings Becker, Jörg; Novikov, Dmitriy (Ed.): Proceedings of the 21st IEEE Conference on Business Informatics (CBI 2019), Moscow, Russia, July 15-17, 2019, Volume 1 - Research Papers, pp. 578–583, IEEE, 2019. @inproceedings{Wurl2019companion, title = {A Conceptual Design of a Digital Companion for Failure Analysis in Rail Automation}, author = {Alexander Wurl and Andreas Falkner and Alois Haselböck and Alexandra Mazak}, editor = {Jörg Becker and Dmitriy Novikov}, doi = {10.1109/CBI.2019.00073}, year = {2019}, date = {2019-08-27}, booktitle = {Proceedings of the 21st IEEE Conference on Business Informatics (CBI 2019), Moscow, Russia, July 15-17, 2019, Volume 1 - Research Papers}, pages = {578--583}, publisher = {IEEE}, abstract = {In Rail Automation, a crucial task in the maintenance phase comprises the process of failure analysis. Domain experts are often faced with various challenges in analyzing large data volumes which reveal highly complex data structures. However, finding causes for potential failures and deciding how to optimize or repair the system may be extensively time consuming. To this end, we propose the concept of a digital companion which serves as continuous assistant recommending optimizations. A sequence of different data analytics methods within the digital companion enables the domain expert to reasonably manage and control the process of failure analysis. In illustrative examples, we give insights in the workflow of a digital companion and discuss the application in the domain of Rail Automation.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } In Rail Automation, a crucial task in the maintenance phase comprises the process of failure analysis. Domain experts are often faced with various challenges in analyzing large data volumes which reveal highly complex data structures. However, finding causes for potential failures and deciding how to optimize or repair the system may be extensively time consuming. To this end, we propose the concept of a digital companion which serves as continuous assistant recommending optimizations. A sequence of different data analytics methods within the digital companion enables the domain expert to reasonably manage and control the process of failure analysis. In illustrative examples, we give insights in the workflow of a digital companion and discuss the application in the domain of Rail Automation. |
Wally, Bernhard; Vyskocil, Jiri; Novak, Petr; Huemer, Christian; Sindelar, Radek; Kadera, Petr; Mazak, Alexandra; Wimmer, Manuel Production Planning with IEC 62264 and PDDL Inproceedings Proceedings of the 17th IEEE International Conference on Industrial Informatics (INDIN 2019), 2019. @inproceedings{Wally2019durative, title = {Production Planning with IEC 62264 and PDDL}, author = {Bernhard Wally and Jiri Vyskocil and Petr Novak and Christian Huemer and Radek Sindelar and Petr Kadera and Alexandra Mazak and Manuel Wimmer}, year = {2019}, date = {2019-07-24}, booktitle = {Proceedings of the 17th IEEE International Conference on Industrial Informatics (INDIN 2019)}, abstract = {Smart production systems need to be able to adapt to changing environments and market needs. They have to reflect changes in (i) the reconfiguration of the production systems themselves, (ii) the processes they perform or (iii) the products they produce. Manual intervention for system adaptation is costly and potentially error-prone. In this article, we propose a model-driven approach for the automatic generation and regeneration of production plans that can be triggered anytime a change in any of the three aforementioned parameters occurs.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } Smart production systems need to be able to adapt to changing environments and market needs. They have to reflect changes in (i) the reconfiguration of the production systems themselves, (ii) the processes they perform or (iii) the products they produce. Manual intervention for system adaptation is costly and potentially error-prone. In this article, we propose a model-driven approach for the automatic generation and regeneration of production plans that can be triggered anytime a change in any of the three aforementioned parameters occurs. |
2018 |
Wally, Bernhard; Huemer, Christian; Mazak, Alexandra; Wimmer, Manuel IEC 62264-2 for AutomationML Inproceedings Proceedings of the 5th AutomationML User Conference, 2018. @inproceedings{Wally2018IECAML, title = {IEC 62264-2 for AutomationML}, author = {Bernhard Wally and Christian Huemer and Alexandra Mazak and Manuel Wimmer}, year = {2018}, date = {2018-10-25}, booktitle = {Proceedings of the 5th AutomationML User Conference}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } |
Gómez, A; Cabot, Jordi; Wimmer, Manuel TemporalEMF: A Temporal Metamodeling Framework Inproceedings Proceedings of the 37th International Conference on Conceptual Modeling (ER 2018), 2018. @inproceedings{Gomez2018temporalEMF, title = {TemporalEMF: A Temporal Metamodeling Framework}, author = {A. Gómez and Jordi Cabot and Manuel Wimmer}, year = {2018}, date = {2018-10-25}, booktitle = {Proceedings of the 37th International Conference on Conceptual Modeling (ER 2018)}, abstract = {Existing modeling tools provide direct access to the most current version of a model but very limited support to inspect the model state in the past. This typically requires looking for a model version (usually stored in some kind of external versioning system like Git) roughly corresponding to the desired period and using it to manually retrieve the required data. This approximate answer is not enough in scenarios that require a more precise and immediate response to temporal queries like complex collaborative co-engineering processes or runtime models. In this paper, we reuse well-known concepts from temporal languages to propose a temporal metamodeling framework, called TemporalEMF, that adds native temporal support for models. In our framework, models are automatically treated as temporal models and can be subjected to temporal queries to retrieve the model contents at different points in time. We have built our framework on top of the Eclipse Modeling Framework (EMF). Behind the scenes, the history of a model is transparently stored in a NoSQL database. We evaluate the resulting TemporalEMF framework with an Industry 4.0 case study about a production system simulator. The results show good scalability for storing and accessing temporal models without requiring changes to the syntax and semantics of the simulator.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } Existing modeling tools provide direct access to the most current version of a model but very limited support to inspect the model state in the past. This typically requires looking for a model version (usually stored in some kind of external versioning system like Git) roughly corresponding to the desired period and using it to manually retrieve the required data. This approximate answer is not enough in scenarios that require a more precise and immediate response to temporal queries like complex collaborative co-engineering processes or runtime models. In this paper, we reuse well-known concepts from temporal languages to propose a temporal metamodeling framework, called TemporalEMF, that adds native temporal support for models. In our framework, models are automatically treated as temporal models and can be subjected to temporal queries to retrieve the model contents at different points in time. We have built our framework on top of the Eclipse Modeling Framework (EMF). Behind the scenes, the history of a model is transparently stored in a NoSQL database. We evaluate the resulting TemporalEMF framework with an Industry 4.0 case study about a production system simulator. The results show good scalability for storing and accessing temporal models without requiring changes to the syntax and semantics of the simulator. |
Wolny, Sabine; Mazak, Alexandra; Wally, Bernhard An Initial Mapping Study on MDE4IoT Inproceedings Proceedings of the 2nd International Workshop on Model-Driven Engineering for the Internet-of-Things (MDE4IoT 2018), 2018. @inproceedings{Wolny2018mde4iot, title = {An Initial Mapping Study on MDE4IoT}, author = {Sabine Wolny and Alexandra Mazak and Bernhard Wally}, year = {2018}, date = {2018-10-24}, booktitle = {Proceedings of the 2nd International Workshop on Model-Driven Engineering for the Internet-of-Things (MDE4IoT 2018)}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } |
Rodriguez-Echeverria, R; Izquierdo, Canovas J; Cabot, Jordi; Wimmer, Manuel Towards a Language Server Protocol Infrastructure for Graphical Modeling Inproceedings Proceedings of the ACM/IEEE 21st International Conference on Model Driven Engineering Languages and Systems (MODELS 2018), 2018. @inproceedings{Echeverria2018lsp, title = {Towards a Language Server Protocol Infrastructure for Graphical Modeling}, author = {R. Rodriguez-Echeverria and J. Canovas Izquierdo and Jordi Cabot and Manuel Wimmer}, year = {2018}, date = {2018-10-19}, booktitle = {Proceedings of the ACM/IEEE 21st International Conference on Model Driven Engineering Languages and Systems (MODELS 2018)}, abstract = {In Model-Driven Engineering (MDE), models are often expressed following a graphical representation of their concepts and associations. MDE tooling allows developers to create models according to their graphical syntax and subsequently, generate code or other kind of models from them. However, the development of full-fledge graphical modeling tools is a challenging and complex task [18]. These tools usually address specific languages and platforms, as supporting multiple ones is not a viable option given the implementation and integration costs. Although the advantages of following the path defined by Language Server Protocol (LSP) are clear for IDE development aimed at graphical languages, currently the question about how to do it properly remains open as LSP has been defined without considering graphical languages. Basically, there is no scientific assessment or tool provider position on whether LSP provides enough expressiveness for graphical manipulations, whether it should be extended to support specific features of graphical edition or whether it would be best to ignore LSP in graphical modeling. Furthermore, LSP definition is still an ongoing work, thus it could be the right moment to suggest reasonable adaptations or extensions to provide support for graphical languages.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } In Model-Driven Engineering (MDE), models are often expressed following a graphical representation of their concepts and associations. MDE tooling allows developers to create models according to their graphical syntax and subsequently, generate code or other kind of models from them. However, the development of full-fledge graphical modeling tools is a challenging and complex task [18]. These tools usually address specific languages and platforms, as supporting multiple ones is not a viable option given the implementation and integration costs. Although the advantages of following the path defined by Language Server Protocol (LSP) are clear for IDE development aimed at graphical languages, currently the question about how to do it properly remains open as LSP has been defined without considering graphical languages. Basically, there is no scientific assessment or tool provider position on whether LSP provides enough expressiveness for graphical manipulations, whether it should be extended to support specific features of graphical edition or whether it would be best to ignore LSP in graphical modeling. Furthermore, LSP definition is still an ongoing work, thus it could be the right moment to suggest reasonable adaptations or extensions to provide support for graphical languages. |
Kessentini, Wael; Wimmer, Manuel; Sahraoui, Houari Integrating the Designer in-the-loop for Metamodel/Model Co-Evolution via Interactive Computational Search Inproceedings Proceedings of the ACM/IEEE 21st International Conference on Model Driven Engineering Languages and Systems (MODELS), 2018. @inproceedings{Kessentini2018ditl, title = {Integrating the Designer in-the-loop for Metamodel/Model Co-Evolution via Interactive Computational Search}, author = {Wael Kessentini and Manuel Wimmer and Houari Sahraoui}, year = {2018}, date = {2018-10-19}, booktitle = {Proceedings of the ACM/IEEE 21st International Conference on Model Driven Engineering Languages and Systems (MODELS)}, abstract = {Metamodels evolve even more frequently than programming languages. This evolution process may result in a large number of instance models that are no longer conforming to the revised meta-model. On the one hand, the manual adaptation of models after the metamodels’ evolution can be tedious, error-prone, and time-consuming. On the other hand, the automated co-evolution of metamodels/models is challenging especially when new semantics is introduced to the metamodels. In this paper, we propose an interactive multi-objective approach that dynamically adapts and interactively suggests edit operations to developers and takes their feedback into consideration. Our approach uses NSGA-II to find a set of good edit operation sequences that minimizes the number of conformance errors, maximizes the similarity with the initial model (reduce the loss of information) and minimizes the number of proposed edit operations. The designer can approve, modify, or reject each of the recommended edit operations, and this feedback is then used to update the proposed rankings of recommended edit operations. We evaluated our approach on a set of metamodel/model coevolution case studies and compared it to fully automated coevolution techniques.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } Metamodels evolve even more frequently than programming languages. This evolution process may result in a large number of instance models that are no longer conforming to the revised meta-model. On the one hand, the manual adaptation of models after the metamodels’ evolution can be tedious, error-prone, and time-consuming. On the other hand, the automated co-evolution of metamodels/models is challenging especially when new semantics is introduced to the metamodels. In this paper, we propose an interactive multi-objective approach that dynamically adapts and interactively suggests edit operations to developers and takes their feedback into consideration. Our approach uses NSGA-II to find a set of good edit operation sequences that minimizes the number of conformance errors, maximizes the similarity with the initial model (reduce the loss of information) and minimizes the number of proposed edit operations. The designer can approve, modify, or reject each of the recommended edit operations, and this feedback is then used to update the proposed rankings of recommended edit operations. We evaluated our approach on a set of metamodel/model coevolution case studies and compared it to fully automated coevolution techniques. |
Rodriguez-Echeverria, R; Izquierdo, Canovas J; Cabot, Jordi; Wimmer, Manuel An LSP infrastructure to build EMF language servers for web-deployable model editors Inproceedings Proceedings of the Second Workshop on Model-Driven Engineering Tools (MDETools 2018), 2018. @inproceedings{Echeverria2018lspemf, title = {An LSP infrastructure to build EMF language servers for web-deployable model editors}, author = {R. Rodriguez-Echeverria and J. Canovas Izquierdo and Jordi Cabot and Manuel Wimmer}, year = {2018}, date = {2018-10-15}, booktitle = {Proceedings of the Second Workshop on Model-Driven Engineering Tools (MDETools 2018)}, abstract = {The development of modern IDEs is still a challenging and time-consuming task, which requires implementing the support for language-specific features such as syntax highlighting or validation. When the IDE targets a graphical language, its development becomes even more complex due to the renderingand manipulation of the graphical notation symbols. To simplify the development of IDEs, the Language Server Protocol (LSP) proposes a decoupled approach based on language-agnostic clients and language-specific servers. LSP clients communicate changes to LSP servers, which validate and store language in-stances. However, LSP only addresses textual languages (i.e., character as atomic unit) and neglects the support for graphical ones (i.e., nodes/edges as atomic units). In this paper, we introduce a novel LSP infrastructure to simplify the development of new graphical modeling tools, in which Web technologies may be used for editor front-ends while leveraging existing modeling frameworks to build language servers. More concretely, in this work, we present the architecture of our LSP infrastructure, based on LSP4J, to build EMF-based graphical language servers.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } The development of modern IDEs is still a challenging and time-consuming task, which requires implementing the support for language-specific features such as syntax highlighting or validation. When the IDE targets a graphical language, its development becomes even more complex due to the renderingand manipulation of the graphical notation symbols. To simplify the development of IDEs, the Language Server Protocol (LSP) proposes a decoupled approach based on language-agnostic clients and language-specific servers. LSP clients communicate changes to LSP servers, which validate and store language in-stances. However, LSP only addresses textual languages (i.e., character as atomic unit) and neglects the support for graphical ones (i.e., nodes/edges as atomic units). In this paper, we introduce a novel LSP infrastructure to simplify the development of new graphical modeling tools, in which Web technologies may be used for editor front-ends while leveraging existing modeling frameworks to build language servers. More concretely, in this work, we present the architecture of our LSP infrastructure, based on LSP4J, to build EMF-based graphical language servers. |
Bordeleau, F; Combemale, Benoit; Eramo, R; den Brand, Van M; Wimmer, Manuel Tool-Support of Socio-Technical Coordination in the Context of Heterogeneous Modeling Inproceedings Proceedings of the 6th International Workshop on The Globalization of Modeling Languages (GEMOC), 2018. @inproceedings{Bordeleau2018stc, title = {Tool-Support of Socio-Technical Coordination in the Context of Heterogeneous Modeling}, author = {F. Bordeleau and Benoit Combemale and R. Eramo and M. Van den Brand and Manuel Wimmer}, year = {2018}, date = {2018-10-15}, booktitle = {Proceedings of the 6th International Workshop on The Globalization of Modeling Languages (GEMOC)}, abstract = {The growing complexity of everyday life systems (and devices)over the last decades has forced the industry to use and investigate different development techniques to manage the many different aspects of the systems. In this context, the use of model driven engineering (MDE) has emerged and is now common practice for many engineering disciplines. However, this comes with important challenges. As set of main challenges relates to the fact that different modeling techniques, languages, and tools are required to deal with the different system aspects, and that support is required to ensure consistence and coherence between the different models. This paper identifies a number of the challenges and paints a roadmap on how tooling can support a multi-model integrated way of working.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } The growing complexity of everyday life systems (and devices)over the last decades has forced the industry to use and investigate different development techniques to manage the many different aspects of the systems. In this context, the use of model driven engineering (MDE) has emerged and is now common practice for many engineering disciplines. However, this comes with important challenges. As set of main challenges relates to the fact that different modeling techniques, languages, and tools are required to deal with the different system aspects, and that support is required to ensure consistence and coherence between the different models. This paper identifies a number of the challenges and paints a roadmap on how tooling can support a multi-model integrated way of working. |
Ciccozzi, F; Famelis, M; Kappel, Gerti; Lambers, L; Paige, R; Pierantonio, A; Rensink, A; Taentzer, G; Vallecillo, Antonio; Wimmer, Manuel Towards a Body of Knowledge for Model-Based Software Engineering Inproceedings Proceedings of the 14th Educators Symposium at MODELS, 2018. @inproceedings{Ciccozzi2018bkmbse, title = {Towards a Body of Knowledge for Model-Based Software Engineering}, author = {F. Ciccozzi and M. Famelis and Gerti Kappel and L. Lambers and R. Paige and A. Pierantonio and A. Rensink and G. Taentzer and Antonio Vallecillo and Manuel Wimmer}, year = {2018}, date = {2018-10-15}, booktitle = {Proceedings of the 14th Educators Symposium at MODELS}, abstract = {Model-based Software Engineering (MBSE) is now accepted as a Software Engineering (SE) discipline and is being taught as part of more general SE curricula. However, an agreed core of concepts,mechanisms and practices — which constitutes the Body of Knowledge of a discipline — has not been captured anywhere, and is only partially covered by the SE Body of Knowledge (SWEBOK). With the goals of characterizing the contents of the MBSE discipline,promoting a consistent view of it worldwide, clarifying its scope with regard to other SE disciplines, and defining a foundation for a curriculum development on MBSE, this paper provides a proposal for an extension of the contents of SWEBOK with the set of fundamental concepts, terms and mechanisms that should constitute the MBSE Body of Knowledge.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } Model-based Software Engineering (MBSE) is now accepted as a Software Engineering (SE) discipline and is being taught as part of more general SE curricula. However, an agreed core of concepts,mechanisms and practices — which constitutes the Body of Knowledge of a discipline — has not been captured anywhere, and is only partially covered by the SE Body of Knowledge (SWEBOK). With the goals of characterizing the contents of the MBSE discipline,promoting a consistent view of it worldwide, clarifying its scope with regard to other SE disciplines, and defining a foundation for a curriculum development on MBSE, this paper provides a proposal for an extension of the contents of SWEBOK with the set of fundamental concepts, terms and mechanisms that should constitute the MBSE Body of Knowledge. |
Pierantonio, A; Ciccozzi, F; Famelis, M; Kappel, Gerti; Lambers, L; Mosser, S; Paige, R; Rensink, A; Salay, R; Taentzer, G; Vallecillo, Antonio; Wimmer, Manuel How do we teach Modelling and Model-Driven Engineering? A survey Inproceedings Proceedings of the 14th Educators Symposium at MODELS, 2018. @inproceedings{Pierantonio2018teach, title = {How do we teach Modelling and Model-Driven Engineering? A survey}, author = {A. Pierantonio and F. Ciccozzi and M. Famelis and Gerti Kappel and L. Lambers and S. Mosser and R. Paige and A. Rensink and R. Salay and G. Taentzer and Antonio Vallecillo and Manuel Wimmer}, year = {2018}, date = {2018-10-15}, booktitle = {Proceedings of the 14th Educators Symposium at MODELS}, abstract = {Understanding the experiences of instructors teaching modelling and model-driven engineering is of great relevance to determining how MDE courses should be managed in terms of content, assessment, and teaching methods. In this paper, we report the results of a survey of 47 instructors in this field. Questions address course content, tools and technologies used, as well as positive and negative factors affecting learning outcomes. We analyse the results and summarise key findings with the potential of improving the state of teaching and learning practices. The survey is a preliminary effort in giving a structured overview on the state-of-the-practice within teaching modeling and model-driven engineering (from the point of view of the instructor).}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } Understanding the experiences of instructors teaching modelling and model-driven engineering is of great relevance to determining how MDE courses should be managed in terms of content, assessment, and teaching methods. In this paper, we report the results of a survey of 47 instructors in this field. Questions address course content, tools and technologies used, as well as positive and negative factors affecting learning outcomes. We analyse the results and summarise key findings with the potential of improving the state of teaching and learning practices. The survey is a preliminary effort in giving a structured overview on the state-of-the-practice within teaching modeling and model-driven engineering (from the point of view of the instructor). |
Isakovic, Haris; Ratasich, Denise; Hirsch, Christian; Platzer, Michael; Wally, Bernhard; Rausch, Thomas; Nickovic, Dejan; Krenn, Willibald; Kappel, Gerti; Dustdar, Schahram; Grosu, Radu CPS/IoT Ecosystem: a platform for research and education Inproceedings Proceedings of the 14th Workshop on Embedded and Cyber-Physical Systems Education (WESE 2018), 2018. @inproceedings{Isakovic2018cpsiot, title = {CPS/IoT Ecosystem: a platform for research and education}, author = {Haris Isakovic and Denise Ratasich and Christian Hirsch and Michael Platzer and Bernhard Wally and Thomas Rausch and Dejan Nickovic and Willibald Krenn and Gerti Kappel and Schahram Dustdar and Radu Grosu}, year = {2018}, date = {2018-10-05}, booktitle = {Proceedings of the 14th Workshop on Embedded and Cyber-Physical Systems Education (WESE 2018)}, abstract = {The CPS/IoT Ecosystem project aims to build an IoT infrastructure that will be used as a platform for research and education in multiple disciplines related to CPS and IoT. The main objective is to provide a real-world infrastructure, and allow students and researchers explore its capabilities on actual use cases.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } The CPS/IoT Ecosystem project aims to build an IoT infrastructure that will be used as a platform for research and education in multiple disciplines related to CPS and IoT. The main objective is to provide a real-world infrastructure, and allow students and researchers explore its capabilities on actual use cases. |
Kessentini, Wael; Sahraoui, Houari; Wimmer, Manuel Automated Co-Evolution of Metamodels and Transformation Rules: A Search-Based Approach Inproceedings Proceedings of the 10th Symposium on Search-Based Software Engineering, 2018. @inproceedings{Kessentini2018ace, title = {Automated Co-Evolution of Metamodels and Transformation Rules: A Search-Based Approach}, author = {Wael Kessentini and Houari Sahraoui and Manuel Wimmer}, year = {2018}, date = {2018-09-09}, booktitle = {Proceedings of the 10th Symposium on Search-Based Software Engineering}, abstract = {Metamodels frequently change over time by adding new concepts or changing existing ones to keep track with the evolving problem domain they aim to capture. This evolution process impacts several depending artifacts such as model instances, constraints, as well as transformation rules. As a consequence, these artifacts have to be co-evolved to ensure their conformance with new metamodel versions. While several studies addressed the problem of metamodel/- model co-evolution3, the co-evolution of metamodels and transformation rules has been less studied. Currently, programmers have to manually change model transformations to make them consistent with the new metamodel versions which require the detection of which transformations to modify and how to properly change them. In this paper, we propose a novel search-based approach to recommend transformation rule changes to make transformations coherent with the new metamodel versions by finding a trade-off between maximizing the coverage of metamodel changes and minimizing the number of static errors in the transformation and the number of applied changes to the transformation. We implemented our approach for the ATLAS Transformation Language (ATL) and validated the proposed approach on four co-evolution case studies.We demonstrate the outperformance of our approach by comparing the quality of the automatically generated co-evolution solutions by NSGA-II with manually revised transformations, one mono-objective algorithm, and random search.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } Metamodels frequently change over time by adding new concepts or changing existing ones to keep track with the evolving problem domain they aim to capture. This evolution process impacts several depending artifacts such as model instances, constraints, as well as transformation rules. As a consequence, these artifacts have to be co-evolved to ensure their conformance with new metamodel versions. While several studies addressed the problem of metamodel/- model co-evolution3, the co-evolution of metamodels and transformation rules has been less studied. Currently, programmers have to manually change model transformations to make them consistent with the new metamodel versions which require the detection of which transformations to modify and how to properly change them. In this paper, we propose a novel search-based approach to recommend transformation rule changes to make transformations coherent with the new metamodel versions by finding a trade-off between maximizing the coverage of metamodel changes and minimizing the number of static errors in the transformation and the number of applied changes to the transformation. We implemented our approach for the ATLAS Transformation Language (ATL) and validated the proposed approach on four co-evolution case studies.We demonstrate the outperformance of our approach by comparing the quality of the automatically generated co-evolution solutions by NSGA-II with manually revised transformations, one mono-objective algorithm, and random search. |
Wally, Bernhard; Huemer, Christian; Mazak, Alexandra; Wimmer, Manuel A Variability Information Model for OPC UA Inproceedings Proceedings of the 23rd IEEE International Conference on Emerging Technologies and Factory Automation (ETFA 2018), 2018. @inproceedings{Wally2018Variability, title = {A Variability Information Model for OPC UA}, author = {Bernhard Wally and Christian Huemer and Alexandra Mazak and Manuel Wimmer}, year = {2018}, date = {2018-09-06}, booktitle = {Proceedings of the 23rd IEEE International Conference on Emerging Technologies and Factory Automation (ETFA 2018)}, abstract = {OPC Unified Architecture (UA) is a powerful technology for modeling and instantiating domain-specific information in a standardized manner. Its initial application scenario is in the domain of automated production systems, that increasingly have to deal with variability information, (i) regarding the products being manufactured and (ii) regarding the production systems themselves. In this work we propose a non-intrusive OPC UA information model for the modeling and querying of variability information using feature models, which are a well-known paradigm in the management of software product lines. Our information model can be applied “aside” existing domain information without interfering with their internal structure.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } OPC Unified Architecture (UA) is a powerful technology for modeling and instantiating domain-specific information in a standardized manner. Its initial application scenario is in the domain of automated production systems, that increasingly have to deal with variability information, (i) regarding the products being manufactured and (ii) regarding the production systems themselves. In this work we propose a non-intrusive OPC UA information model for the modeling and querying of variability information using feature models, which are a well-known paradigm in the management of software product lines. Our information model can be applied “aside” existing domain information without interfering with their internal structure. |
Lueder, Arndt; Wimmer, Manuel Modelling Consistency Rules within Production System Engineering Inproceedings Proceedings of the 14th International Conference on Automation Science and Engineering (CASE 2018), 2018. @inproceedings{Lueder2018mcr, title = {Modelling Consistency Rules within Production System Engineering}, author = {Arndt Lueder and Manuel Wimmer}, year = {2018}, date = {2018-08-24}, booktitle = {Proceedings of the 14th International Conference on Automation Science and Engineering (CASE 2018)}, abstract = {The engineering of control systems is an essential part within the engineering of production systems cumulating various predecessor engineering activities. Therefore a high data quality of the predecessor activities has to be ensured especially avoiding inconsistencies between provided sets of engineering data. Within this paper, a methodology is sketched applicable to model engineering discipline crossing consistency rules to enable an automatic evaluation for consistency management. It is based on the use of AutomationML as production system modelling language but can be generalized to further modelling means.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } The engineering of control systems is an essential part within the engineering of production systems cumulating various predecessor engineering activities. Therefore a high data quality of the predecessor activities has to be ensured especially avoiding inconsistencies between provided sets of engineering data. Within this paper, a methodology is sketched applicable to model engineering discipline crossing consistency rules to enable an automatic evaluation for consistency management. It is based on the use of AutomationML as production system modelling language but can be generalized to further modelling means. |
Wimmer, Manuel; Mazak, Alexandra From AutomationML to AutomationQL: A By-Example Query Language for CPPS Engineering Models Inproceedings Proceedings of the 14th International Conference on Automation Science and Engineering (CASE 2018), 2018. @inproceedings{Wimmer2018amlaql, title = {From AutomationML to AutomationQL: A By-Example Query Language for CPPS Engineering Models}, author = {Manuel Wimmer and Alexandra Mazak}, year = {2018}, date = {2018-08-24}, booktitle = {Proceedings of the 14th International Conference on Automation Science and Engineering (CASE 2018)}, abstract = {Model-based engineering is an emerging paradigm to deal with the complexity of multi-disciplinary engineering in CPPS projects. In such projects, different kinds of models are created during the lifecycle of a production system. AutomationML is a promising standard to provide a unifying format to represent and connect the different engineering models. Dedicated tool support has been developed for AutomationML in the last years to create and evolve models. However, when it comes to querying AutomationML models, implementation-related query languages have to be currently used. These languages have a certain complexity as they are not directly based on the concepts of AutomationML but on the underlying technological concepts and encodings of AutomationML. This often hinders the formulation of automatically executable queries by domain experts. In this paper, we propose a dedicated query language for AutomationML called Automation Query Language (AutomationQL) which is directly derived from AutomationML. Using this query language, queries can be defined in a by-example manner which allows engineers to formulate queries in terms of AutomationML concepts instead of switching to an implementation-oriented query language. We illustrate how AutomationQL is defined, how queries can be formulated as well as how tool support is provided to automatically evaluate the queries and represent their results. Finally, we contrast our solution with existing query languages and derive a roadmap for future research on AutomationQL.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } Model-based engineering is an emerging paradigm to deal with the complexity of multi-disciplinary engineering in CPPS projects. In such projects, different kinds of models are created during the lifecycle of a production system. AutomationML is a promising standard to provide a unifying format to represent and connect the different engineering models. Dedicated tool support has been developed for AutomationML in the last years to create and evolve models. However, when it comes to querying AutomationML models, implementation-related query languages have to be currently used. These languages have a certain complexity as they are not directly based on the concepts of AutomationML but on the underlying technological concepts and encodings of AutomationML. This often hinders the formulation of automatically executable queries by domain experts. In this paper, we propose a dedicated query language for AutomationML called Automation Query Language (AutomationQL) which is directly derived from AutomationML. Using this query language, queries can be defined in a by-example manner which allows engineers to formulate queries in terms of AutomationML concepts instead of switching to an implementation-oriented query language. We illustrate how AutomationQL is defined, how queries can be formulated as well as how tool support is provided to automatically evaluate the queries and represent their results. Finally, we contrast our solution with existing query languages and derive a roadmap for future research on AutomationQL. |
Wally, Bernhard; Huemer, Christian; Mazak, Alexandra; Wimmer, Manuel AutomationML, ISA-95 and Others: Rendezvous in the OPC UA Universe Inproceedings Proceedings of the 14th IEEE International Conference on Automation Science and Engineering (CASE 2018), 2018. @inproceedings{Wally2018Rendenvous, title = {AutomationML, ISA-95 and Others: Rendezvous in the OPC UA Universe}, author = {Bernhard Wally and Christian Huemer and Alexandra Mazak and Manuel Wimmer}, year = {2018}, date = {2018-08-20}, booktitle = {Proceedings of the 14th IEEE International Conference on Automation Science and Engineering (CASE 2018)}, abstract = {OPC Unified Architecture (UA) is a powerful and versatile platform for hosting information from a large variety of domains. In some cases, the domain-specific information models provide overlapping information, such as (i) different views on a specific entity or (ii) different levels of detail of a single entity. Emerging from a multi-disciplinary engineering process, these different views can stem from various tools that have been used to deal with that entity, or from different stages in an engineering process, e.g., from requirements engineering over system design and implementation to operations. In this work, we provide a small but expressive set of OPC UA reference types that unobtrusively allow the persistent instantiation of additional knowledge with respect to relations between OPC UA nodes. We will show the application of these reference types on the basis of a rendezvous of AutomationML and ISA-95 in an OPC UA server.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } OPC Unified Architecture (UA) is a powerful and versatile platform for hosting information from a large variety of domains. In some cases, the domain-specific information models provide overlapping information, such as (i) different views on a specific entity or (ii) different levels of detail of a single entity. Emerging from a multi-disciplinary engineering process, these different views can stem from various tools that have been used to deal with that entity, or from different stages in an engineering process, e.g., from requirements engineering over system design and implementation to operations. In this work, we provide a small but expressive set of OPC UA reference types that unobtrusively allow the persistent instantiation of additional knowledge with respect to relations between OPC UA nodes. We will show the application of these reference types on the basis of a rendezvous of AutomationML and ISA-95 in an OPC UA server. |
Leroy, Dorian; Bousse, Erwan; Megna, A; Combemale, Benoit; Wimmer, Manuel Trace Comprehension Operators for Executable DSLs Inproceedings Proceedings of the 14th European Conference on Modelling Foundations and Applications (ECMFA 2018), pp. 293-310, 2018, ISBN: 978-3-319-92996-5. @inproceedings{Leroy2018tco, title = {Trace Comprehension Operators for Executable DSLs}, author = {Dorian Leroy and Erwan Bousse and A. Megna and Benoit Combemale and Manuel Wimmer}, doi = {10.1007/978-3-319-92997-2_19}, isbn = {978-3-319-92996-5}, year = {2018}, date = {2018-06-28}, booktitle = {Proceedings of the 14th European Conference on Modelling Foundations and Applications (ECMFA 2018)}, pages = {293-310}, abstract = {Recent approaches contribute facilities to breathe life into metamodels, thus making behavioral models directly executable. Such facilities are particularly helpful to better utilize a model over the time dimension, e.g., for early validation and verification. However, when even a small change is made to the model, to the language definition (e.g., semantic variation points), or to the external stimuli of an execution scenario, it remains difficult for a designer to grasp the impact of such a change on the resulting execution trace. This prevents accessible trade-off analysis and design-space exploration on behavioral models. In this paper, we propose a set of formally defined operators for analyzing execution traces. The operators include dynamic trace filtering, trace comparison with diff computation and visualization, and graph-based view extraction to analyze cycles. The operators are applied and validated on a demonstrative example that highlight their usefulness for the comprehension specific aspects of the underlying traces.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } Recent approaches contribute facilities to breathe life into metamodels, thus making behavioral models directly executable. Such facilities are particularly helpful to better utilize a model over the time dimension, e.g., for early validation and verification. However, when even a small change is made to the model, to the language definition (e.g., semantic variation points), or to the external stimuli of an execution scenario, it remains difficult for a designer to grasp the impact of such a change on the resulting execution trace. This prevents accessible trade-off analysis and design-space exploration on behavioral models. In this paper, we propose a set of formally defined operators for analyzing execution traces. The operators include dynamic trace filtering, trace comparison with diff computation and visualization, and graph-based view extraction to analyze cycles. The operators are applied and validated on a demonstrative example that highlight their usefulness for the comprehension specific aspects of the underlying traces. |
Burgueño, Loli; Mayerhofer, Tanja; Wimmer, Manuel; Vallecillo, Antonio Using Physical Quantities in Robot Software Models Inproceedings Proceedings of the 1st International Workshop on Robotics Software Engineering (RoSE@ICSE 2018), pp. 23-28, 2018. @inproceedings{Burgueno2018upq, title = {Using Physical Quantities in Robot Software Models}, author = {Loli Burgueño and Tanja Mayerhofer and Manuel Wimmer and Antonio Vallecillo}, doi = {10.1145/3196558.3196562}, year = {2018}, date = {2018-05-28}, booktitle = {Proceedings of the 1st International Workshop on Robotics Software Engineering (RoSE@ICSE 2018)}, pages = {23-28}, abstract = {One of the challenges of modeling any software application that deals with real-world physical systems resides in the correct representation of numerical values and their units. This paper shows how both measurement uncertainty and units can be effectively incorporated into software models, becoming part of their basic type systems, and illustrates this approach in the particular case of a robot language. We show how our approach allows robot modelers to safely represent and manipulate units and measurement uncertainties of the robots and their elements in a natural manner, statically ensuring unit-safe assignments and operations, as well as the propagation of uncertainty in the computations of derived attributes and operations.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } One of the challenges of modeling any software application that deals with real-world physical systems resides in the correct representation of numerical values and their units. This paper shows how both measurement uncertainty and units can be effectively incorporated into software models, becoming part of their basic type systems, and illustrates this approach in the particular case of a robot language. We show how our approach allows robot modelers to safely represent and manipulate units and measurement uncertainties of the robots and their elements in a natural manner, statically ensuring unit-safe assignments and operations, as well as the propagation of uncertainty in the computations of derived attributes and operations. |
Mazak, Alexandra; Wimmer, Manuel; Patsuk-Boesch, Polina Execution-based Model Profiling Inproceedings Post-Proceedings of the6th International Symposium on Data-Driven Process Discovery and Analysis, pp. 37-52, Springer International Publishing, Cham, 2018, ISBN: 978-3-319-74160-4. @inproceedings{Mazak2017ebmp, title = {Execution-based Model Profiling}, author = {Alexandra Mazak and Manuel Wimmer and Polina Patsuk-Boesch}, doi = {10.1007/978-3-319-74161-1_3}, isbn = {978-3-319-74160-4}, year = {2018}, date = {2018-01-26}, booktitle = {Post-Proceedings of the6th International Symposium on Data-Driven Process Discovery and Analysis}, volume = {307}, pages = {37-52}, publisher = {Springer International Publishing}, address = {Cham}, series = {Lecture Notes in Business Information Processing}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } |
2017 |
Wolny, Sabine; Mazak, Alexandra; Konlechner, Rafael; Wimmer, Manuel Towards Continuous Behavior Mining Inproceedings Ceravolo, P; Keulen, Van M; Stoffel, K (Ed.): CEUR Workshop Proceedings: Proceedings of the 7th International Symposium on Data-driven Process Discovery and Analysis (SIMPDA 2017), pp. 149-150, 2017, ISSN: 1613-0073. @inproceedings{Wolny2017, title = {Towards Continuous Behavior Mining}, author = {Sabine Wolny and Alexandra Mazak and Rafael Konlechner and Manuel Wimmer}, editor = {P. Ceravolo and M. Van Keulen and K. Stoffel}, url = {http://ceur-ws.org/Vol-2016/paper13.pdf}, issn = {1613-0073}, year = {2017}, date = {2017-12-07}, booktitle = {CEUR Workshop Proceedings: Proceedings of the 7th International Symposium on Data-driven Process Discovery and Analysis (SIMPDA 2017)}, volume = {Vol-2016}, pages = {149-150}, abstract = {With new advances in Cyber-Physical Systems (CPS) and Internet of Things (IoT), more and more discrete software controllers interact with continuous physical systems. Workflow models are a classical approach to define controllers. However, the effect of the associated actions that are activated by executing the workflow may not spontaneously be realized but have to be realized over time. Generally, behavioral model elements such as activities in workflow languages are displayed mostly as black box, meaning that it is not possible to trace variable changes over time in most of the classical modeling approaches. In this paper, we introduce an envisioned architecture to cope with this challenge.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } With new advances in Cyber-Physical Systems (CPS) and Internet of Things (IoT), more and more discrete software controllers interact with continuous physical systems. Workflow models are a classical approach to define controllers. However, the effect of the associated actions that are activated by executing the workflow may not spontaneously be realized but have to be realized over time. Generally, behavioral model elements such as activities in workflow languages are displayed mostly as black box, meaning that it is not possible to trace variable changes over time in most of the classical modeling approaches. In this paper, we introduce an envisioned architecture to cope with this challenge. |
Wally, Bernhard; Huemer, Christian; Mazak, Alexandra Aligning Business Services with Production Services: The Case of REA and ISA-95 Inproceedings Proceedings of the 10th IEEE International Conference on Service Oriented Computing and Applications (SOCA 2017), 2017. @inproceedings{Wally2017abs, title = {Aligning Business Services with Production Services: The Case of REA and ISA-95}, author = {Bernhard Wally and Christian Huemer and Alexandra Mazak}, doi = {10.1109/SOCA.2017.10}, year = {2017}, date = {2017-11-22}, booktitle = {Proceedings of the 10th IEEE International Conference on Service Oriented Computing and Applications (SOCA 2017)}, abstract = {"Industrie 4.0" aims at flexible production networks that require horizontal integration across companies. Evidently, any production related information exchanged in the network must be vertically forwarded to the corresponding service endpoints of the local production system. Accordingly, there is a need to align information that flows between companies and within each company. The Resource-Event-Agent (REA) business ontology describes a metamodel for internal business activities (e.g., production) and for inter-organizational exchange constellations on the enterprise resource planning (ERP) level. ISA-95 is a series of standards targeting the integration of enterprise control systems on the interface between ERP systems and manufacturing execution systems. Consequently, we align elements of REA and ISA-95 and define conversion rules for the transformation of elements from one system to the other. By interleaving the semantics of both standards, we formally strengthen the links between the services of the business level and the production level, and support multi-system adaptation in flexible production environments.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } "Industrie 4.0" aims at flexible production networks that require horizontal integration across companies. Evidently, any production related information exchanged in the network must be vertically forwarded to the corresponding service endpoints of the local production system. Accordingly, there is a need to align information that flows between companies and within each company. The Resource-Event-Agent (REA) business ontology describes a metamodel for internal business activities (e.g., production) and for inter-organizational exchange constellations on the enterprise resource planning (ERP) level. ISA-95 is a series of standards targeting the integration of enterprise control systems on the interface between ERP systems and manufacturing execution systems. Consequently, we align elements of REA and ISA-95 and define conversion rules for the transformation of elements from one system to the other. By interleaving the semantics of both standards, we formally strengthen the links between the services of the business level and the production level, and support multi-system adaptation in flexible production environments. |
Mazak, Alexandra; Wimmer, Manuel Sequence Pattern Mining: Automatisches Erkennen und Auswerten von Interaktionsmustern zwischen technischen Assets basierend auf SysML-Sequenzdiagrammen Inproceedings Proceedings of Tag des Software Engineerings (TdSE 2017), Paderborn, 2017. @inproceedings{Mazak2017tdse, title = {Sequence Pattern Mining: Automatisches Erkennen und Auswerten von Interaktionsmustern zwischen technischen Assets basierend auf SysML-Sequenzdiagrammen}, author = {Alexandra Mazak and Manuel Wimmer}, year = {2017}, date = {2017-11-09}, booktitle = {Proceedings of Tag des Software Engineerings (TdSE 2017)}, address = {Paderborn}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } |
Bill, Robert; Neubauer, Patrick; Wimmer, Manuel Virtual Textual Model Composition for Supporting Versioning and Aspect-Orientation Inproceedings Proceedings of the 10th ACM SIGPLAN International Conference on Software Language Engineering (SLE), ACM, 2017. @inproceedings{Wimmer23.1, title = {Virtual Textual Model Composition for Supporting Versioning and Aspect-Orientation}, author = {Robert Bill and Patrick Neubauer and Manuel Wimmer}, doi = {10.1234/5678}, year = {2017}, date = {2017-10-20}, booktitle = {Proceedings of the 10th ACM SIGPLAN International Conference on Software Language Engineering (SLE)}, publisher = {ACM}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } |
Books
2017 |
Brambilla, Marco; Cabot, Jordi; Wimmer, Manuel Model-Driven Software Engineering in Practice Book 2, 2017, ISBN: 9781627057080. @book{Brambilla2017mdse, title = {Model-Driven Software Engineering in Practice}, author = {Marco Brambilla and Jordi Cabot and Manuel Wimmer}, isbn = {9781627057080}, year = {2017}, date = {2017-01-01}, edition = {2}, keywords = {}, pubstate = {published}, tppubtype = {book} } |
Book Chapters
2017 |
Berardinelli, Luca; Mazak, Alexandra; Alt, Oliver; Wimmer, Manuel; Kappel, Gerti Model-Driven Systems Engineering: Principles and Applications in the CPPS Domain Book Chapter Biffl, Stefan; Lueder, Arndt; Gerhard, Detlef (Ed.): Multi-Disciplinary Engineering for Cyber-Physical Production Systems: Data Models and Software Solutions for Handling Complex Engineering Projects, pp. 261-299, Springer International Publishing, 2017, ISBN: 978-3-319-56345-9. @inbook{Berardinelli2017mdse, title = {Model-Driven Systems Engineering: Principles and Applications in the CPPS Domain}, author = {Luca Berardinelli and Alexandra Mazak and Oliver Alt and Manuel Wimmer and Gerti Kappel}, editor = {Stefan Biffl and Arndt Lueder and Detlef Gerhard}, doi = {10.1007/978-3-319-56345-9_11}, isbn = {978-3-319-56345-9}, year = {2017}, date = {2017-05-07}, booktitle = {Multi-Disciplinary Engineering for Cyber-Physical Production Systems: Data Models and Software Solutions for Handling Complex Engineering Projects}, pages = {261-299}, publisher = {Springer International Publishing}, abstract = {To engineer large, complex, and interdisciplinary systems, modeling is considered as the universal technique to understand and simplify reality through abstraction, and thus, models are in the center as the most important artifacts throughout interdisciplinary activities within model-driven engineering processes. Model-Driven Systems Engineering (MDSE) is a systems engineering paradigm that promotes the systematic adoption of models throughout the engineering process by identifying and integrating appropriate concepts, languages, techniques, and tools. This chapter discusses current advances as well as challenges towards the adoption of model-driven approaches in cyber-physical production systems (CPPS) engineering. In particular, we discuss how modeling standards, modeling languages, and model transformations are employed to support current systems engineering processes in the CPPS domain, and we show their integration and application based on a case study concerning a lab-sized production system. The major outcome of this case study is the realization of an automated engineering tool chain, including the languages SysML, AML, and PMIF, to perform early design and validation.}, keywords = {}, pubstate = {published}, tppubtype = {inbook} } To engineer large, complex, and interdisciplinary systems, modeling is considered as the universal technique to understand and simplify reality through abstraction, and thus, models are in the center as the most important artifacts throughout interdisciplinary activities within model-driven engineering processes. Model-Driven Systems Engineering (MDSE) is a systems engineering paradigm that promotes the systematic adoption of models throughout the engineering process by identifying and integrating appropriate concepts, languages, techniques, and tools. This chapter discusses current advances as well as challenges towards the adoption of model-driven approaches in cyber-physical production systems (CPPS) engineering. In particular, we discuss how modeling standards, modeling languages, and model transformations are employed to support current systems engineering processes in the CPPS domain, and we show their integration and application based on a case study concerning a lab-sized production system. The major outcome of this case study is the realization of an automated engineering tool chain, including the languages SysML, AML, and PMIF, to perform early design and validation. |
Whitepapers
2018 |
Wally, Bernhard Provisioning for MES and ERP - Support for IEC 62264-2 and B2MML Technical Manual TU Wien, AutomationML e.V. 2018. @manual{Wally2018AR, title = {Provisioning for MES and ERP - Support for IEC 62264-2 and B2MML}, author = {Bernhard Wally}, url = {https://www.automationml.org/o.red.c/news-236.html}, year = {2018}, date = {2018-07-06}, organization = {TU Wien, AutomationML e.V.}, keywords = {}, pubstate = {published}, tppubtype = {manual} } |
Theses
2020 |
Lehner Daniel Advisor: Univ.-Prof. Mag. Dr. Manuel Wimmer, Co-Advisor: DI Sabine Wolny Model-based Detection of Runtime Inconsistencies Masters Thesis TU Wien, Fakultät für Informatik, 2020. @mastersthesis{Lehner2020, title = {Model-based Detection of Runtime Inconsistencies}, author = {Lehner Daniel, Advisor: Univ.-Prof. Mag. Dr. Manuel Wimmer, Co-Advisor: DI Sabine Wolny}, year = {2020}, date = {2020-09-01}, address = {Fakultät für Informatik}, school = {TU Wien, Fakultät für Informatik}, abstract = {With an increasing dependency on software in more and more aspects of our lives, the information whether these software systems work as expected at any point in time becomes crucial. However, limitations of existing approaches lead to a loss of information about consistency between expected and actual behavior, especially after system deployment.(1) Full validation of software artifacts is often not possible any longer in modern software systems. Increasing complexity leads to an exploding number of execution possibilities,which can be infeasible to compute and check. (2) Software testing allows to overcome this challenge by allowing to check particular parts of a system, but with limitations when it comes to consistency after system deployment. (3) Runtime monitoring systems provide information on system errors after deployment, but the high manual effort required to set up such a system limits its application in practice. In this thesis, it is examined to which extent reusing existing model information can provide a solution to the challenges faced with the approaches mentioned above. Therefore, the Inconsistency Detection Based on Models (IDBoM) framework is implemented as a showcase how existing model information can be used for inconsistency checking. The IDBoM framework uses UML Activity Diagrams and UML Class Diagrams as a representation of the expected behavior of a system, and UML Sequence Diagrams as representation of its actual behavior after deployment. After the inconsistency checking process is triggered by a running system, a set of consistency rules is executed on these diagrams to retrieve a checking result. This checking result indicates whether the actual behavior of the checked system is consistent with its expected behavior. In order to facilitate the consistency between expected system behavior and existing models, a reusable solution for programatically interacting with models with the focus on usability is created as part of the IDBoM framework.The intention of this solution is to contribute to the implementation of use cases for reusing existing model information, to increase benefits of keeping models up to date.An evaluation of the implemented artifact shows that full automation of a model-based inconsistency checking process is possible, from process initiation to processing of the checking result. The covered inconsistency cases are comparable to software testing.Execution times of the implemented inconsistency checking process scale linearly for all tested parameters. Finally, the usability of model interactions is improved compared to existing solutions on all tested parameters. These results indicate that a model-based inconsistency checking process can be used to provide information about the correct behavior of a system after deployment}, keywords = {}, pubstate = {published}, tppubtype = {mastersthesis} } With an increasing dependency on software in more and more aspects of our lives, the information whether these software systems work as expected at any point in time becomes crucial. However, limitations of existing approaches lead to a loss of information about consistency between expected and actual behavior, especially after system deployment.(1) Full validation of software artifacts is often not possible any longer in modern software systems. Increasing complexity leads to an exploding number of execution possibilities,which can be infeasible to compute and check. (2) Software testing allows to overcome this challenge by allowing to check particular parts of a system, but with limitations when it comes to consistency after system deployment. (3) Runtime monitoring systems provide information on system errors after deployment, but the high manual effort required to set up such a system limits its application in practice. In this thesis, it is examined to which extent reusing existing model information can provide a solution to the challenges faced with the approaches mentioned above. Therefore, the Inconsistency Detection Based on Models (IDBoM) framework is implemented as a showcase how existing model information can be used for inconsistency checking. The IDBoM framework uses UML Activity Diagrams and UML Class Diagrams as a representation of the expected behavior of a system, and UML Sequence Diagrams as representation of its actual behavior after deployment. After the inconsistency checking process is triggered by a running system, a set of consistency rules is executed on these diagrams to retrieve a checking result. This checking result indicates whether the actual behavior of the checked system is consistent with its expected behavior. In order to facilitate the consistency between expected system behavior and existing models, a reusable solution for programatically interacting with models with the focus on usability is created as part of the IDBoM framework.The intention of this solution is to contribute to the implementation of use cases for reusing existing model information, to increase benefits of keeping models up to date.An evaluation of the implemented artifact shows that full automation of a model-based inconsistency checking process is possible, from process initiation to processing of the checking result. The covered inconsistency cases are comparable to software testing.Execution times of the implemented inconsistency checking process scale linearly for all tested parameters. Finally, the usability of model interactions is improved compared to existing solutions on all tested parameters. These results indicate that a model-based inconsistency checking process can be used to provide information about the correct behavior of a system after deployment |
Neubauer Patrick, Advisor: Univ.-Prof. Mag. Dr. Manuel Wimmer TU Wien, Fakultät für Informatik, 2020. @phdthesis{Neubauer2020, title = {A Framework for Modernizing Domain-Specific Languages - From XML Schema to Consistency-Achieving Editors with Reusable Notations}, author = {Neubauer Patrick, Advisor: Univ.-Prof. Mag. Dr. Manuel Wimmer}, url = {https://doi.org/10.34726/hss.2020.31485}, doi = {10.34726/hss.2020.31485}, year = {2020}, date = {2020-07-08}, address = {A-1040 Wien, Karlsplatz 13}, school = {TU Wien, Fakultät für Informatik}, abstract = {The introduction of Extensible Markup Language (XML) Schema Definitions (XSDs) represented a tremendous leap towards the design of domain-specific languages (DSLs) by enabling machine processibility of domain models conforming to formally described language grammar, i.e. capturing vocabulary and valid sentences. Consequently, it elevated the need for automating the creation and maintenance of dedicated and modern integrated development environments (IDEs) evading inherent XML limitations, such as rigid angle-bracket syntax, as well as enabling the support of valid domain model construction. Techniques and tools provided by model-driven engineering frameworks and language workbench frameworks offer elementary assistance during the initial implementation of a DSL. These frameworks, however, fail to automate DSL generation due to disparities exposed by the transformation and synthesis of XSDs, language grammars, and metamodels. Moreover, fundamental differences in the nature of language grammars and metamodels challenge the construction and application of language notations. Although language workbenches are capable of integrating seamlessly dedicated validators, context assistants, and quick fix providers into domain-specific IDEs, their implementation and maintenance still requires proficient language knowledge and development. This thesis contributes towards addressing the above-mentioned problems. First, it proposes an approach to generate automatically DSL grammars from XSD-based languages by bridging gaps in the transformations of structural components, and by enriching metamodels with structural constraints imposed by XSD restrictions. Second, it proposes an approach to generate automatically domain-specific IDEs with accurate validators, sensible context assistants, and cost-effective quick fix providers by employing search-based software engineering at runtime. Third, it proposes an approach to formulate domain structure-agnostic textual notations for modeling languages by decoupling representational from structural information in grammar definitions, and by providing a language style framework capable of generating grammars from arbitrary metamodels and style specifications. In order to demonstrate the usefulness of the proposed approaches, the developed prototypical implementations are evaluated based on a comprehensive industrial standard for the classification and description of products, a comparison with state-of-the-art language workbench frameworks, integration with model assistance tooling, and individual case studies such as cloud topology and orchestration modeling.}, keywords = {}, pubstate = {published}, tppubtype = {phdthesis} } The introduction of Extensible Markup Language (XML) Schema Definitions (XSDs) represented a tremendous leap towards the design of domain-specific languages (DSLs) by enabling machine processibility of domain models conforming to formally described language grammar, i.e. capturing vocabulary and valid sentences. Consequently, it elevated the need for automating the creation and maintenance of dedicated and modern integrated development environments (IDEs) evading inherent XML limitations, such as rigid angle-bracket syntax, as well as enabling the support of valid domain model construction. Techniques and tools provided by model-driven engineering frameworks and language workbench frameworks offer elementary assistance during the initial implementation of a DSL. These frameworks, however, fail to automate DSL generation due to disparities exposed by the transformation and synthesis of XSDs, language grammars, and metamodels. Moreover, fundamental differences in the nature of language grammars and metamodels challenge the construction and application of language notations. Although language workbenches are capable of integrating seamlessly dedicated validators, context assistants, and quick fix providers into domain-specific IDEs, their implementation and maintenance still requires proficient language knowledge and development. This thesis contributes towards addressing the above-mentioned problems. First, it proposes an approach to generate automatically DSL grammars from XSD-based languages by bridging gaps in the transformations of structural components, and by enriching metamodels with structural constraints imposed by XSD restrictions. Second, it proposes an approach to generate automatically domain-specific IDEs with accurate validators, sensible context assistants, and cost-effective quick fix providers by employing search-based software engineering at runtime. Third, it proposes an approach to formulate domain structure-agnostic textual notations for modeling languages by decoupling representational from structural information in grammar definitions, and by providing a language style framework capable of generating grammars from arbitrary metamodels and style specifications. In order to demonstrate the usefulness of the proposed approaches, the developed prototypical implementations are evaluated based on a comprehensive industrial standard for the classification and description of products, a comparison with state-of-the-art language workbench frameworks, integration with model assistance tooling, and individual case studies such as cloud topology and orchestration modeling. |
Wally Bernhard Advisor: a.o.Univ.-Prof. Mag. Dr. Christian Huemer, Co-Advisor: Univ.-Prof. Mag. Dr. Manuel Wimmer Smart Manufacturing Systems: Model-Driven Integration of ERP and MOM PhD Thesis TU Wien, Institute of Information Systems Engineering Business Informatics Group (BIG), 2020. @phdthesis{Wally2020, title = {Smart Manufacturing Systems: Model-Driven Integration of ERP and MOM }, author = {Wally Bernhard, Advisor: a.o.Univ.-Prof. Mag. Dr. Christian Huemer, Co-Advisor: Univ.-Prof. Mag. Dr. Manuel Wimmer}, year = {2020}, date = {2020-06-17}, address = {Institute of Information Systems Engineering Business Informatics Group (BIG) Favoritenstraße 9-11/194-3, 1040 Vienna, Austria}, school = {TU Wien, Institute of Information Systems Engineering Business Informatics Group (BIG)}, abstract = {Automated production systems are following a general technological trend: increasingly complex products, combined with drastically reduced lot-sizes per product variant, as well as shorter response and production times are being demanded. In order to be able to meet these expectations, modern IT systems at all levels of the automation hierarchy are required: from business related software at the corporate management level, down to the programmable logic controllers at the field level. For a well-designed coupling of systems that are located at different levels, it is necessary to find, define, and implement clear data conversion mechanisms - this endeavor is also known as vertical integration. At the same time, it is necessary to automate the inter-organizational data exchange - an aspect of horizontal integration. In this thesis, we are recapitulating a selection of own contributions in the context of information integration for smart manufacturing systems. With respect to conceptual models we have been employing established industrial standards, in order to facilitate industrial application. We have conceptualized, implemented and tested a series of conceptual models, inter-model mappings and transformations. Our approaches and implementations have been successfully evaluated by a number of experiments and case studies and are therefore a contribution towards model-driven smart manufacturing systems. }, type = {thesis}, keywords = {}, pubstate = {published}, tppubtype = {phdthesis} } Automated production systems are following a general technological trend: increasingly complex products, combined with drastically reduced lot-sizes per product variant, as well as shorter response and production times are being demanded. In order to be able to meet these expectations, modern IT systems at all levels of the automation hierarchy are required: from business related software at the corporate management level, down to the programmable logic controllers at the field level. For a well-designed coupling of systems that are located at different levels, it is necessary to find, define, and implement clear data conversion mechanisms - this endeavor is also known as vertical integration. At the same time, it is necessary to automate the inter-organizational data exchange - an aspect of horizontal integration. In this thesis, we are recapitulating a selection of own contributions in the context of information integration for smart manufacturing systems. With respect to conceptual models we have been employing established industrial standards, in order to facilitate industrial application. We have conceptualized, implemented and tested a series of conceptual models, inter-model mappings and transformations. Our approaches and implementations have been successfully evaluated by a number of experiments and case studies and are therefore a contribution towards model-driven smart manufacturing systems. |
Korath Christoph, Advisor: Univ.-Prof. Mag. Dr. Manuel Wimmer Eine Cloud-basierte Programmierumgebung für QML Masters Thesis TU Wien, Fakultät für Informatik, 2020. @mastersthesis{wimmer2020i, title = { Eine Cloud-basierte Programmierumgebung für QML}, author = {Korath Christoph, Advisor: Univ.-Prof. Mag. Dr. Manuel Wimmer}, url = {https://permalink.catalogplus.tuwien.at/AC15610144}, year = {2020}, date = {2020-03-02}, address = {A-1040 Wien, Karlsplatz 13}, school = {TU Wien, Fakultät für Informatik}, abstract = {Das Aufkommen von Cloud-Computing in den letzten Jahren hat die Softwareentwicklung nachhaltig verändert, denn die Einsatzmöglichkeiten der Cloud könnten vielfältiger nicht sein. Ob der Einsatz im Internet of Things, als Basis für eine Streaming-Plattform oder für andere rechenintensive Operationen, das Aufkommen der Cloud hat viele neue Lösungen ermöglicht. Bei all den Möglichkeiten der Cloud scheint es nur logisch, diese als Softwareentwickler und Softwareentwicklerin nicht nur für das eigene Produkt einzusetzen, sondern auch zum eigenen Vorteil während der Softwareentwicklung zu nutzen. Das Konzept hinter dieser Arbeit greift diesen Gedanken auf und versucht, dazu einen Teil der Aufgaben der Softwareentwicklung in der Cloud abzubilden. Dafür soll eine Cloud-basierte Entwicklungsumgebung für QML entstehen. Mit Hilfe dieser soll die Entwicklung von Applikationen für mobile Geräte ermöglicht werden, wobei die Entwicklungsschritte beginnend beim Schreiben der Applikation bis zur Auslieferung dieser über den Cloud-Service abgedeckt werden können. Durch die Einbindung der Cloud sollen lokale Installationen von Entwicklungssoftware verringert und auf längere Sicht obsolet gemacht werden. Durch den Wegfall der lokalen Entwicklungsumgebungen kann eine häufige Fehlerquelle eliminiert werden, die Installation der Software selbst. Fehlerquellen wie diese, die dazu führen können, dass Anwender und Anwenderinnen ein Entwicklungstool nicht effektiv einsetzen können, sind die eigentlichen Hintergründe dieser Arbeit. Das Ergebnis dieser Arbeit zeigt, wie eine funktionierende Alternative zu lokalen Entwicklungsumgebungen aussehen kann. Dazu wurde eine Cloud-basierte IDE entwickelt, welche in ein bereits zum Teil existierendes Cloud-System integriert wurde. In einer eingehenden Recherche wurden zuerst mögliche Optionen für eine adäquate Basis der IDE, die für die weitere Entwicklung eingesetzt werden sollte, abgewogen. Durch Einbringen von Änderungen und durch Einbinden von weiteren Ressourcen wurde die Anwendung GitLab in eine Cloud-basierte IDE umgewandelt. Die Funktionen des Cloud-basierten Ansatzes wurde gemeinsam mit Experten und Expertinnen einer Analyse unterzogen, welche auch dazu genutzt wurde, um mögliche Schwachstellen und Verbesserungspotential zu identifizieren.}, keywords = {}, pubstate = {published}, tppubtype = {mastersthesis} } Das Aufkommen von Cloud-Computing in den letzten Jahren hat die Softwareentwicklung nachhaltig verändert, denn die Einsatzmöglichkeiten der Cloud könnten vielfältiger nicht sein. Ob der Einsatz im Internet of Things, als Basis für eine Streaming-Plattform oder für andere rechenintensive Operationen, das Aufkommen der Cloud hat viele neue Lösungen ermöglicht. Bei all den Möglichkeiten der Cloud scheint es nur logisch, diese als Softwareentwickler und Softwareentwicklerin nicht nur für das eigene Produkt einzusetzen, sondern auch zum eigenen Vorteil während der Softwareentwicklung zu nutzen. Das Konzept hinter dieser Arbeit greift diesen Gedanken auf und versucht, dazu einen Teil der Aufgaben der Softwareentwicklung in der Cloud abzubilden. Dafür soll eine Cloud-basierte Entwicklungsumgebung für QML entstehen. Mit Hilfe dieser soll die Entwicklung von Applikationen für mobile Geräte ermöglicht werden, wobei die Entwicklungsschritte beginnend beim Schreiben der Applikation bis zur Auslieferung dieser über den Cloud-Service abgedeckt werden können. Durch die Einbindung der Cloud sollen lokale Installationen von Entwicklungssoftware verringert und auf längere Sicht obsolet gemacht werden. Durch den Wegfall der lokalen Entwicklungsumgebungen kann eine häufige Fehlerquelle eliminiert werden, die Installation der Software selbst. Fehlerquellen wie diese, die dazu führen können, dass Anwender und Anwenderinnen ein Entwicklungstool nicht effektiv einsetzen können, sind die eigentlichen Hintergründe dieser Arbeit. Das Ergebnis dieser Arbeit zeigt, wie eine funktionierende Alternative zu lokalen Entwicklungsumgebungen aussehen kann. Dazu wurde eine Cloud-basierte IDE entwickelt, welche in ein bereits zum Teil existierendes Cloud-System integriert wurde. In einer eingehenden Recherche wurden zuerst mögliche Optionen für eine adäquate Basis der IDE, die für die weitere Entwicklung eingesetzt werden sollte, abgewogen. Durch Einbringen von Änderungen und durch Einbinden von weiteren Ressourcen wurde die Anwendung GitLab in eine Cloud-basierte IDE umgewandelt. Die Funktionen des Cloud-basierten Ansatzes wurde gemeinsam mit Experten und Expertinnen einer Analyse unterzogen, welche auch dazu genutzt wurde, um mögliche Schwachstellen und Verbesserungspotential zu identifizieren. |
Mann Markus, Advisor: Univ.-Prof. Mag. Dr. Manuel Wimmer Portierung eines universellen IoT-basierten Software Stacks Masters Thesis TU Wien, Fakultät für Informatik, 2020. @mastersthesis{wimmer2020, title = {Portierung eines universellen IoT-basierten Software Stacks}, author = {Mann Markus, Advisor: Univ.-Prof. Mag. Dr. Manuel Wimmer}, url = {https://permalink.catalogplus.tuwien.at/AC15609862}, year = {2020}, date = {2020-03-02}, address = {A-1040 Wien, Karlsplatz 13}, school = {TU Wien, Fakultät für Informatik}, abstract = {Derzeit keine Kurzfassung verfügbar. }, keywords = {}, pubstate = {published}, tppubtype = {mastersthesis} } Derzeit keine Kurzfassung verfügbar. |
Bill Robert Advisor: Prof. Dr. Gerti Kappel, Co-Advisor: Univ.-Prof. Mag. Dr. Manuel Wimmer Model Integration by Hybrid Model Virtualization PhD Thesis TU Wien, Fakultät für Informatik, 2020. @phdthesis{bill2020, title = {Model Integration by Hybrid Model Virtualization}, author = {Bill Robert, Advisor: Prof. Dr. Gerti Kappel, Co-Advisor: Univ.-Prof. Mag. Dr. Manuel Wimmer}, url = {https://resolver.obvsg.at/urn:nbn:at:at-ubtuw:1-137914 http://hdl.handle.net/20.500.12708/1067 }, year = {2020}, date = {2020-01-31}, address = {A-1040 Wien, Karlsplatz 13}, school = {TU Wien, Fakultät für Informatik}, abstract = {Multiple teams working on a single system may each have different viewpoints, and thus, use different models. These models may have partly shared, unique, or interrelated information, requiring model integration. To work faster and in a more parallel way, temporary inconsistencies between multiple models may be accepted. However, shared information only edited by a single team could still be immediately made known globally. The two main approaches to model integration are model virtualization, i.e., deriving all models from a single source of truth and model synchronization, i.e., propagating changes between different materialized models. While model virtualization does not allow temporary inconsistencies between models, model synchronization may require storing duplicate information redundantly, even if only a single team is involved. Thus, this thesis combines model virtualization with model synchronization into a hybrid approach. A new model virtualization approach helps arbitrarily adding or subtracting models from a base model. The base model can be a single model, an intersection or union of multiple models, a modification of another base model, or a model derivation. As we can store arbitrary (user) changes to the base model without affecting it, we allow temporary inconsistencies and arbitrary changes to the base model, e.g., as a result of changing the derivations source model. Incompatible changes never require user intervention, but just cause semantic constraint violations in a newly defined synchronization model, which is valid if and only if all inter-model constraints including feature derivations are fulfilled. To produce quickfix suggestions in (textual) model editors, optimal model synchronization is regarded as finding an optimal synchronization model. For this optimization, both model finders and heuristic search is employed. Model derivations can be specified using a new basic model derivation language, which includes both derivation and synchronization constraints in a single model. This allows for pure derivation by not editing the derived model as well as pure synchronization by specifying constraints just for inter-model consistency, but not for derivation. This hybrid approach is feasible and can support use cases like editing multiple models simultaneously using virtualization. Our proposed model repair does significantly reduce the number of (synchronization) constraint violations and prevent new ones due to improved autocompletion as shown in our evaluation scenarios. }, keywords = {}, pubstate = {published}, tppubtype = {phdthesis} } Multiple teams working on a single system may each have different viewpoints, and thus, use different models. These models may have partly shared, unique, or interrelated information, requiring model integration. To work faster and in a more parallel way, temporary inconsistencies between multiple models may be accepted. However, shared information only edited by a single team could still be immediately made known globally. The two main approaches to model integration are model virtualization, i.e., deriving all models from a single source of truth and model synchronization, i.e., propagating changes between different materialized models. While model virtualization does not allow temporary inconsistencies between models, model synchronization may require storing duplicate information redundantly, even if only a single team is involved. Thus, this thesis combines model virtualization with model synchronization into a hybrid approach. A new model virtualization approach helps arbitrarily adding or subtracting models from a base model. The base model can be a single model, an intersection or union of multiple models, a modification of another base model, or a model derivation. As we can store arbitrary (user) changes to the base model without affecting it, we allow temporary inconsistencies and arbitrary changes to the base model, e.g., as a result of changing the derivations source model. Incompatible changes never require user intervention, but just cause semantic constraint violations in a newly defined synchronization model, which is valid if and only if all inter-model constraints including feature derivations are fulfilled. To produce quickfix suggestions in (textual) model editors, optimal model synchronization is regarded as finding an optimal synchronization model. For this optimization, both model finders and heuristic search is employed. Model derivations can be specified using a new basic model derivation language, which includes both derivation and synchronization constraints in a single model. This allows for pure derivation by not editing the derived model as well as pure synchronization by specifying constraints just for inter-model consistency, but not for derivation. This hybrid approach is feasible and can support use cases like editing multiple models simultaneously using virtualization. Our proposed model repair does significantly reduce the number of (synchronization) constraint violations and prevent new ones due to improved autocompletion as shown in our evaluation scenarios. |
Patsuk-Boesch Polina Advisor: Univ.-Prof. Mag. Dr. Manuel Wimmer, Co-Advisor: DI Mag. Dr. Alexandra Mazak-Huemer A Framework for Execution-based Model Profiling Masters Thesis TU Wien, Fakultät für Informatik, 2020. @mastersthesis{wimmer2020h, title = {A Framework for Execution-based Model Profiling}, author = {Patsuk-Boesch Polina, Advisor: Univ.-Prof. Mag. Dr. Manuel Wimmer, Co-Advisor: DI Mag. Dr. Alexandra Mazak-Huemer}, url = {https://resolver.obvsg.at/urn:nbn:at:at-ubtuw:1-135392 http://hdl.handle.net/20.500.12708/1190 }, year = {2020}, date = {2020-01-10}, address = {A-1040 Wien, Karlsplatz 13}, school = {TU Wien, Fakultät für Informatik}, abstract = {In Model-Driven Engineering (MDE) models are put in the center and used throughout the software development process in prescriptive ways. Although these prescriptive models are important during system implementation, descriptive models derived from runtime data offer valuable information in later phases of the system life cycle. Unfortunately, such descriptive models are only marginally explored in the field of MDE. Current MDE approaches mostly neglect the possibility to describe an existing and operating system using the information upstream from operations to design. To create a link between prescriptive and descriptive models, we propose a unifying framework for a combined but loosely-coupled usage of MDE approaches and process mining (PM) techniques. This framework embodies the execution-based model profiling as a continuous process to improve prescriptive models at design-time through runtime information. We provide an evaluation case study in order to demonstrate the feasibility and benefits of the introduced approach. In this case study we implement a prototype of our framework to register logs from a running system. The implemented prototype transforms the registered logs into XES-format for further processing and analysis via PM algorithms. We prove that the resulting model profiles are sufficient enough for runtime verification. Furthermore, we demonstrate the possibility to maintain model profiles for multiple concerns, such as functionality, performance and components interrelations, through the unifying framework.}, keywords = {}, pubstate = {published}, tppubtype = {mastersthesis} } In Model-Driven Engineering (MDE) models are put in the center and used throughout the software development process in prescriptive ways. Although these prescriptive models are important during system implementation, descriptive models derived from runtime data offer valuable information in later phases of the system life cycle. Unfortunately, such descriptive models are only marginally explored in the field of MDE. Current MDE approaches mostly neglect the possibility to describe an existing and operating system using the information upstream from operations to design. To create a link between prescriptive and descriptive models, we propose a unifying framework for a combined but loosely-coupled usage of MDE approaches and process mining (PM) techniques. This framework embodies the execution-based model profiling as a continuous process to improve prescriptive models at design-time through runtime information. We provide an evaluation case study in order to demonstrate the feasibility and benefits of the introduced approach. In this case study we implement a prototype of our framework to register logs from a running system. The implemented prototype transforms the registered logs into XES-format for further processing and analysis via PM algorithms. We prove that the resulting model profiles are sufficient enough for runtime verification. Furthermore, we demonstrate the possibility to maintain model profiles for multiple concerns, such as functionality, performance and components interrelations, through the unifying framework. |
2019 |
Peherstorfer David Advisor: Univ.-Prof. Mag. Dr. Manuel Wimmer, Co-Advisor: Univ.-Ass. DI Galina Paskaleva BIM and blockchain; a decentralized solution for a change management workflow in construction projects Masters Thesis TU Wien, Fakultät für Informatik, 2019. @mastersthesis{peherstorfer2019, title = {BIM and blockchain; a decentralized solution for a change management workflow in construction projects}, author = {Peherstorfer David, Advisor: Univ.-Prof. Mag. Dr. Manuel Wimmer, Co-Advisor: Univ.-Ass. DI Galina Paskaleva }, url = {https://resolver.obvsg.at/urn:nbn:at:at-ubtuw:1-125414 http://hdl.handle.net/20.500.12708/13791 }, year = {2019}, date = {2019-04-10}, address = {A-1040 Wien, Karlsplatz 13}, school = {TU Wien, Fakultät für Informatik}, abstract = {There is a big potential for process optimizations, due to the digitalization gap in the construction business. New digital technologies, as the Building Information Modelling (BIM), are increasingly being adapted by the stakeholders in this area. On the other hand, blockchain is a very new and innovative technology domain which has grown immensely in the last several years, and where people are now trying to find the right use-cases. Especially, the even newer field of smart contract development has opened the door for a large amount of possible applications, where it is neither clear if these can actually be implemented as envisioned, nor if there is even a need for a decentralized solution at all. In a construction project, changes on BIM models are only to be approved by the appropriate stakeholder. Therefore, we have combined the BIM models, which are stored using a Git repository, with a release management workflow, which is realised as a smart contract on the Ethereum blockchain.This enables the workflow to be transparent, traceable and its results to be immutable. The goal of this work is to create a prototype and compare it to other (off-chain) solutions and to evaluate if an application of a combination of BIM and blockchain yields an advantage in terms of costs and security.}, keywords = {}, pubstate = {published}, tppubtype = {mastersthesis} } There is a big potential for process optimizations, due to the digitalization gap in the construction business. New digital technologies, as the Building Information Modelling (BIM), are increasingly being adapted by the stakeholders in this area. On the other hand, blockchain is a very new and innovative technology domain which has grown immensely in the last several years, and where people are now trying to find the right use-cases. Especially, the even newer field of smart contract development has opened the door for a large amount of possible applications, where it is neither clear if these can actually be implemented as envisioned, nor if there is even a need for a decentralized solution at all. In a construction project, changes on BIM models are only to be approved by the appropriate stakeholder. Therefore, we have combined the BIM models, which are stored using a Git repository, with a release management workflow, which is realised as a smart contract on the Ethereum blockchain.This enables the workflow to be transparent, traceable and its results to be immutable. The goal of this work is to create a prototype and compare it to other (off-chain) solutions and to evaluate if an application of a combination of BIM and blockchain yields an advantage in terms of costs and security. |
Eigner Alexander, Advisor: Univ.-Prof. Mag. Dr. Manuel Wimmer From Modeling Languages to Query Languages: A Generative Approach Masters Thesis TU Wien, Fakultät für Informatik, 2019. @mastersthesis{eigner2019, title = {From Modeling Languages to Query Languages: A Generative Approach}, author = {Eigner Alexander, Advisor: Univ.-Prof. Mag. Dr. Manuel Wimmer}, url = {https://resolver.obvsg.at/urn:nbn:at:at-ubtuw:1-125136 http://hdl.handle.net/20.500.12708/13795}, year = {2019}, date = {2019-03-21}, address = {A-1040 Wien, Karlsplatz 13}, school = {TU Wien, Fakultät für Informatik}, abstract = {The utilization of models and approaches, such as MDE, in engineering fields grows in popularity, because models provide useful means for the data-exchange, testing, validation and code generation. Domain experts prefer to use DSMLs over GPMLs. GPMLs can be hard to understand, since they require a sufficient education in computer science-related fields and may not be easily applicable for the modeling of domain-specific artefacts. In contrast, DSMLs are tailored towards particular domains and can thus be easily understood and applied by domain-experts. In the process of the ongoing digitalization models constantly grow in size and complexity. Thus, the need for querying models, which are usually created with DSMLs, grows as well, whereat model querying is not only important for information retrieval, but can also provide powerful means for the testing and validation of large systems. Although many well-established model querying approaches already exist, they are usually meant to be used by IT-experts and ignore the need of users from utterly different engineering fields for easy-to-use query languages, who lack the necessary IT know-how. Besides that, users, who lack the knowledge about the DSML's metamodels, may run into metamodel-related pitfalls. In order to meet these needs, an EMF-based prototype has been developed in the course of this thesis based on Wieringa's Engineering Cycle , that generates the model querying language MQL automatically from the Ecore metamodel of a chosen DSML and provides means for the specification and execution of MQL queries. This approach should provide query languages that resemble the original DSMLs as much as possible, by reusing and modifying the DSML's original elements. This prototype also generates an XText grammar specification that will be used for writing MQL queries. A model-to-text generator translates MQL queries into equivalent Viatra VQL queries, which are then executed by the MQL's Viatra-based query engine. Hence, MQL firstly tries to be easily usable by domain experts and secondly helps users, who lack knowledge about the DSML's metamodel, by ``guiding'' them through the DSML's syntactical features. A literature survey has yielded only one related work that can be considered as relatively comparable to the approach of this thesis. This result emphasizes the novelty of this approach and the relatively little amount of attention that has been paid to the addressed domain expert's needs so far. The MQL prototype has been evaluated in terms of query execution time and usability against Viatra VQL. The evaluation of the execution times shows, that MQL's Viatra VQL code generator needs to be optimized, in order to allow MQL to benefit from the rapidness of its underlying Viatra VQL query engine. Furthermore, MQL achieved higher scores in the Usability evaluation than Viatra VQL regarding the effectiveness, efficiency and satisfiability.}, keywords = {}, pubstate = {published}, tppubtype = {mastersthesis} } The utilization of models and approaches, such as MDE, in engineering fields grows in popularity, because models provide useful means for the data-exchange, testing, validation and code generation. Domain experts prefer to use DSMLs over GPMLs. GPMLs can be hard to understand, since they require a sufficient education in computer science-related fields and may not be easily applicable for the modeling of domain-specific artefacts. In contrast, DSMLs are tailored towards particular domains and can thus be easily understood and applied by domain-experts. In the process of the ongoing digitalization models constantly grow in size and complexity. Thus, the need for querying models, which are usually created with DSMLs, grows as well, whereat model querying is not only important for information retrieval, but can also provide powerful means for the testing and validation of large systems. Although many well-established model querying approaches already exist, they are usually meant to be used by IT-experts and ignore the need of users from utterly different engineering fields for easy-to-use query languages, who lack the necessary IT know-how. Besides that, users, who lack the knowledge about the DSML's metamodels, may run into metamodel-related pitfalls. In order to meet these needs, an EMF-based prototype has been developed in the course of this thesis based on Wieringa's Engineering Cycle , that generates the model querying language MQL automatically from the Ecore metamodel of a chosen DSML and provides means for the specification and execution of MQL queries. This approach should provide query languages that resemble the original DSMLs as much as possible, by reusing and modifying the DSML's original elements. This prototype also generates an XText grammar specification that will be used for writing MQL queries. A model-to-text generator translates MQL queries into equivalent Viatra VQL queries, which are then executed by the MQL's Viatra-based query engine. Hence, MQL firstly tries to be easily usable by domain experts and secondly helps users, who lack knowledge about the DSML's metamodel, by ``guiding'' them through the DSML's syntactical features. A literature survey has yielded only one related work that can be considered as relatively comparable to the approach of this thesis. This result emphasizes the novelty of this approach and the relatively little amount of attention that has been paid to the addressed domain expert's needs so far. The MQL prototype has been evaluated in terms of query execution time and usability against Viatra VQL. The evaluation of the execution times shows, that MQL's Viatra VQL code generator needs to be optimized, in order to allow MQL to benefit from the rapidness of its underlying Viatra VQL query engine. Furthermore, MQL achieved higher scores in the Usability evaluation than Viatra VQL regarding the effectiveness, efficiency and satisfiability. |
2018 |
Kletzander Christian, Advisor: Univ.-Prof. Mag. Dr. Manuel Wimmer A Kernel Language based Exchange Framework for Behavioural Modelling Languages Masters Thesis TU Wien, Fakultät für Informatik, 2018. @mastersthesis{kletzander2018, title = {A Kernel Language based Exchange Framework for Behavioural Modelling Languages}, author = {Kletzander Christian, Advisor: Univ.-Prof. Mag. Dr. Manuel Wimmer}, url = {https://resolver.obvsg.at/urn:nbn:at:at-ubtuw:1-115837 http://hdl.handle.net/20.500.12708/7745}, year = {2018}, date = {2018-08-31}, address = {A-1040 Wien, Karlsplatz 13}, school = {TU Wien, Fakultät für Informatik}, abstract = {The interoperability for exchanging behavioural models between different tools in automation is only achieved by a small amount, which are supporting standardized import and export formats. There is no transformation framework existing for exchanging different behavioural models through a standardized kernel language. The literature describes several techniques to transform a modelling language into another pre-defined modelling language, but all of these are fixed to specific types of modelling languages and do not support the general exchange between any behavioural modelling language. In this thesis, I introduce a new technique that allows exchanging a small amount of behavioural modelling languages through a standardized kernel language based exchange framework. I am using the Intermediate Modelling Layer (IML) from the AutomationML consortium as a kernel language for exchanging activity-on-node networks (AONN) into GANTT charts and back. By doing a case study based evaluation the generated input and output models of the different behaviour modelling types are analysed for possible information loss after exchanging them. The round trip transformation from GANTT to AONN and back has no information loss, whereas AONN to GANTT and back loses information attributes like delay, latest start time point, earliest start time point and latest end time point.}, keywords = {}, pubstate = {published}, tppubtype = {mastersthesis} } The interoperability for exchanging behavioural models between different tools in automation is only achieved by a small amount, which are supporting standardized import and export formats. There is no transformation framework existing for exchanging different behavioural models through a standardized kernel language. The literature describes several techniques to transform a modelling language into another pre-defined modelling language, but all of these are fixed to specific types of modelling languages and do not support the general exchange between any behavioural modelling language. In this thesis, I introduce a new technique that allows exchanging a small amount of behavioural modelling languages through a standardized kernel language based exchange framework. I am using the Intermediate Modelling Layer (IML) from the AutomationML consortium as a kernel language for exchanging activity-on-node networks (AONN) into GANTT charts and back. By doing a case study based evaluation the generated input and output models of the different behaviour modelling types are analysed for possible information loss after exchanging them. The round trip transformation from GANTT to AONN and back has no information loss, whereas AONN to GANTT and back loses information attributes like delay, latest start time point, earliest start time point and latest end time point. |
Dopplinger Marc Advisor: O.Univ.-Prof. DI Mag. Dr. Gerti Kappel, Co-Advisor: Univ.-Prof. Mag. Dr. Manuel Wimmer Supporting Model Extensions in RubyTL Masters Thesis TU Wien, Fakultät für Informatik, 2018. @mastersthesis{dopplinger2018, title = {Supporting Model Extensions in RubyTL}, author = {Dopplinger Marc, Advisor: O.Univ.-Prof. DI Mag. Dr. Gerti Kappel, Co-Advisor: Univ.-Prof. Mag. Dr. Manuel Wimmer}, url = {https://resolver.obvsg.at/urn:nbn:at:at-ubtuw:1-115693 http://hdl.handle.net/20.500.12708/5441}, year = {2018}, date = {2018-08-23}, address = {A-1040 Wien, Karlsplatz 13}, school = {TU Wien, Fakultät für Informatik}, abstract = {Model Engineering gets more important in software development because of the increasing use of models. At the same it is important the adapt and extend existing models. But this is sometimes not possible. For example the model was developed from somebody else or the model is used in another project and it is necessary to keep the compatibility. Therefore several lightweight extension mechanism have been developed. For example UML profile for UML diagrams or EMF profiles for standard diagrams of the Eclipse Modeling Framework (EMF). They allow to extend an already existing model without changing the original one. But unfortunately they have some drawbacks. Only a few transformation languages have a support for lightweight extensions and if they do only very basic. ATL can only access the profile with the underlying Java API. With RubyTL it is not possible to process profiles at all. This thesis covers the development of an extension which enables RubyTL to process EMF and UML profiles. Thereby should the extension be not integrated into the RubyTL code. This will be done with model processors. They integrate the profile into the existing model. Due to the circumstance that the profile is now a complete part of the diagram it is possible that the transformation language can access the stereotypes. Furthermore should it be possible to use the model processors for other transformation languages, like ATL. The goal is to enable the use of UML and EMF profiles also for other transformation languages. But they do not get integrated into the language. The model processors are used from a command line interface (CLI). The feasibility of the approach is demonstrated by using transformations to apply and read profile information in RubyTL and ATL. The resulting ATL transformations are also compared with ATL transformations using the basic ATL support based on the Java API for UML.}, keywords = {}, pubstate = {published}, tppubtype = {mastersthesis} } Model Engineering gets more important in software development because of the increasing use of models. At the same it is important the adapt and extend existing models. But this is sometimes not possible. For example the model was developed from somebody else or the model is used in another project and it is necessary to keep the compatibility. Therefore several lightweight extension mechanism have been developed. For example UML profile for UML diagrams or EMF profiles for standard diagrams of the Eclipse Modeling Framework (EMF). They allow to extend an already existing model without changing the original one. But unfortunately they have some drawbacks. Only a few transformation languages have a support for lightweight extensions and if they do only very basic. ATL can only access the profile with the underlying Java API. With RubyTL it is not possible to process profiles at all. This thesis covers the development of an extension which enables RubyTL to process EMF and UML profiles. Thereby should the extension be not integrated into the RubyTL code. This will be done with model processors. They integrate the profile into the existing model. Due to the circumstance that the profile is now a complete part of the diagram it is possible that the transformation language can access the stereotypes. Furthermore should it be possible to use the model processors for other transformation languages, like ATL. The goal is to enable the use of UML and EMF profiles also for other transformation languages. But they do not get integrated into the language. The model processors are used from a command line interface (CLI). The feasibility of the approach is demonstrated by using transformations to apply and read profile information in RubyTL and ATL. The resulting ATL transformations are also compared with ATL transformations using the basic ATL support based on the Java API for UML. |
Proyer Clemens Advisor: Ao.Univ.Prof. Mag. Dr. Christian Huemer, Co-Advisor: DI Mag. Dr. Alexandra Mazak-Huemer Transfer monitoring from University to Industry Masters Thesis TU Wien, Fakultät für Informatik, 2018. @mastersthesis{proyer2018, title = {Transfer monitoring from University to Industry}, author = {Proyer Clemens, Advisor: Ao.Univ.Prof. Mag. Dr. Christian Huemer, Co-Advisor: DI Mag. Dr. Alexandra Mazak-Huemer}, url = {https://resolver.obvsg.at/urn:nbn:at:at-ubtuw:1-115769 http://hdl.handle.net/20.500.12708/5423}, year = {2018}, date = {2018-08-20}, address = {A-1040 Wien, Karlsplatz 13}, school = {TU Wien, Fakultät für Informatik}, abstract = {The measurement of the knowledge change of employees as well as the transfer is discussed in this thesis. Although these two terms are often used synonymously, there is a difference between them. Learning is adapting to a situation whereas transfer is applying the knowledge to similar situations. There are many approaches to measuring learning success or transfer, most of which originate in educational science. In this thesis we consider the special case of innovation courses, where there are further requirements that must be met. Unfortunately, the existing frameworks are not designed for these requirements and are therefore not sufficient. An innovation course is a long-term course in which employees of companies are taught in a certain topic. Such an innovation course consists of several modules for which both the measurement of learning success and knowledge transfer for the participants must take place. To achieve this and to make the measurements repeatable and objective, we have developed a framework. We use the Design Science Approach to develop the framework. However, the goal is not to create a static artefact that can only be applied to the course of our case study, but to design a framework that is also easily adaptable and applicable in other innovation courses or in a similar environment. To test and improve the framework, we use it in four modules of the DigiTrans 4.0 innovation course. For three of the four modules of our case study, the difference between the knowledge before the module and at the end is statistically significant. We also create linear models to explain or predict the transfer. The models are created with and without heteroscedasticity adjustment. The results of the models are slightly different, but show a common trend, which originates from the same background formula. Since these characteristics are known in the literature of knowledge transfer, the framework created is well suited for measuring the transfer.}, keywords = {}, pubstate = {published}, tppubtype = {mastersthesis} } The measurement of the knowledge change of employees as well as the transfer is discussed in this thesis. Although these two terms are often used synonymously, there is a difference between them. Learning is adapting to a situation whereas transfer is applying the knowledge to similar situations. There are many approaches to measuring learning success or transfer, most of which originate in educational science. In this thesis we consider the special case of innovation courses, where there are further requirements that must be met. Unfortunately, the existing frameworks are not designed for these requirements and are therefore not sufficient. An innovation course is a long-term course in which employees of companies are taught in a certain topic. Such an innovation course consists of several modules for which both the measurement of learning success and knowledge transfer for the participants must take place. To achieve this and to make the measurements repeatable and objective, we have developed a framework. We use the Design Science Approach to develop the framework. However, the goal is not to create a static artefact that can only be applied to the course of our case study, but to design a framework that is also easily adaptable and applicable in other innovation courses or in a similar environment. To test and improve the framework, we use it in four modules of the DigiTrans 4.0 innovation course. For three of the four modules of our case study, the difference between the knowledge before the module and at the end is statistically significant. We also create linear models to explain or predict the transfer. The models are created with and without heteroscedasticity adjustment. The results of the models are slightly different, but show a common trend, which originates from the same background formula. Since these characteristics are known in the literature of knowledge transfer, the framework created is well suited for measuring the transfer. |
Plettenberg Rudolf Advisor: Univ.-Prof. Mag. Dr. Manuel Wimmer, Co-Advisor: DI Mag. Dr. Alexandra Mazak-Huemer Frameworks for Distributed Big Data Processing: A Comparison in the Domain of Predictive Maintenance Masters Thesis TU Wien, Fakultät für Informatik, 2018. @mastersthesis{plettenberg2018, title = {Frameworks for Distributed Big Data Processing: A Comparison in the Domain of Predictive Maintenance}, author = {Plettenberg Rudolf, Advisor: Univ.-Prof. Mag. Dr. Manuel Wimmer, Co-Advisor: DI Mag. Dr. Alexandra Mazak-Huemer}, editor = {Alex}, url = {https://resolver.obvsg.at/urn:nbn:at:at-ubtuw:1-116011 http://hdl.handle.net/20.500.12708/5431}, year = {2018}, date = {2018-04-16}, address = {A-1040 Wien, Karlsplatz 13}, school = {TU Wien, Fakultät für Informatik}, abstract = {Predictive maintenance is a novel approach for making maintenance decisions, lowering maintenance costs, increasing a plants capacity and production volume, and positively affecting environmental and employee safety. In predictive maintenance, condition data of machines is constantly collected and analysed to predict future machine failures. Due to the high volume, velocity, and variety of gathered data, Big Data analytic frameworks are necessary to provide the desired results. The performance of these frameworks highly influences the overall performance of a predictive maintenance system, raising the need for tools to measure it. Benchmarks present such tools by defining general workloads for a system to measure its performance. Due to the wide popularity of Big Data analytics across industries, benchmarks for Big Data analytic frameworks are defined specifically for each domain. While there are currently many benchmarks available for other domains such as retail, social network, or search engines, there are none available for Big Data analytic frameworks in the application area of predictive maintenance. This thesis introduces the predictive maintenance benchmark (PMB). The PMB is a benchmark aimed at measuring the performance of Big Data analytic frameworks in the field of predictive maintenance. The data model and workload of the PMB represent typical tasks encountered by a predictive maintenance system. The PMB is implemented in the two most popular Big Data analytic ecosystems Hadoop and Spark and show Spark outperforming Hadoop in almost every task. For evaluation, findings gathered during implementation and execution of the PMB are analysed. Furthermore, the PMB results are validated against other studies comparing Hadoop and Spark.}, keywords = {}, pubstate = {published}, tppubtype = {mastersthesis} } Predictive maintenance is a novel approach for making maintenance decisions, lowering maintenance costs, increasing a plants capacity and production volume, and positively affecting environmental and employee safety. In predictive maintenance, condition data of machines is constantly collected and analysed to predict future machine failures. Due to the high volume, velocity, and variety of gathered data, Big Data analytic frameworks are necessary to provide the desired results. The performance of these frameworks highly influences the overall performance of a predictive maintenance system, raising the need for tools to measure it. Benchmarks present such tools by defining general workloads for a system to measure its performance. Due to the wide popularity of Big Data analytics across industries, benchmarks for Big Data analytic frameworks are defined specifically for each domain. While there are currently many benchmarks available for other domains such as retail, social network, or search engines, there are none available for Big Data analytic frameworks in the application area of predictive maintenance. This thesis introduces the predictive maintenance benchmark (PMB). The PMB is a benchmark aimed at measuring the performance of Big Data analytic frameworks in the field of predictive maintenance. The data model and workload of the PMB represent typical tasks encountered by a predictive maintenance system. The PMB is implemented in the two most popular Big Data analytic ecosystems Hadoop and Spark and show Spark outperforming Hadoop in almost every task. For evaluation, findings gathered during implementation and execution of the PMB are analysed. Furthermore, the PMB results are validated against other studies comparing Hadoop and Spark. |
Detamble Christian, Advisor: Univ.-Prof. Mag. Dr. Manuel Wimmer An Interactive Modeling Editor for QVT Relations Masters Thesis TU Wien, Fakultät für Informatik, 2018. @mastersthesis{detamble2018, title = {An Interactive Modeling Editor for QVT Relations}, author = {Detamble Christian, Advisor: Univ.-Prof. Mag. Dr. Manuel Wimmer}, url = {https://resolver.obvsg.at/urn:nbn:at:at-ubtuw:1-109728 http://hdl.handle.net/20.500.12708/6109}, year = {2018}, date = {2018-03-27}, address = {A-1040 Wien, Karlsplatz 13}, school = {TU Wien, Fakultät für Informatik}, abstract = {Since its publication in 2008, Query/View/Transformation-Relations (QVTr) claims to be the standard model transformation language (MTL) for the declarative specification of model transformations, and has been used as an enabling formalism. In consideration of productivity being a central goal in MDE, it is vital for tools and editors to maximize the usability of their implementing MTL. However, taking into account the current state of the art in tools for QVTr, several shortcomings are revealed. First, the availability of matured tools is sparse, and furthermore, they have been developed with the goal to enable the underlying technology. Their design is not user-centered and, in particular, they lack from a poor level of automation and interactivity. In addition, we identify a lack of support for short feedback cycles, which significantly influences the usability of both the editor and implementing MTL. Finally, we consider the neglection of QVTr's concrete, graphical syntax in state of the art editors as unused potential for an increase in readability and traceability. In the context of this thesis, we shed light on the impact of an increase in interactivity, automation, readability, traceability, the usage of QVTr's graphical syntax, and of short feedback cycles on the usability of QVTr. For this purpose, we propose a theoretical concept comprising techniques to push the modeling process towards a user-centered approach. The underlying key principles of our concept comprise the so called outward modeling style, a suggestion-driven process, interactive graphical model visualizations and the enforcement of conventions. To show the feasibility of our approach, we conduct user experiments in an industrial context at the LieberLieber Software GmbH company in Vienna, using a prototypical implementation.}, keywords = {}, pubstate = {published}, tppubtype = {mastersthesis} } Since its publication in 2008, Query/View/Transformation-Relations (QVTr) claims to be the standard model transformation language (MTL) for the declarative specification of model transformations, and has been used as an enabling formalism. In consideration of productivity being a central goal in MDE, it is vital for tools and editors to maximize the usability of their implementing MTL. However, taking into account the current state of the art in tools for QVTr, several shortcomings are revealed. First, the availability of matured tools is sparse, and furthermore, they have been developed with the goal to enable the underlying technology. Their design is not user-centered and, in particular, they lack from a poor level of automation and interactivity. In addition, we identify a lack of support for short feedback cycles, which significantly influences the usability of both the editor and implementing MTL. Finally, we consider the neglection of QVTr's concrete, graphical syntax in state of the art editors as unused potential for an increase in readability and traceability. In the context of this thesis, we shed light on the impact of an increase in interactivity, automation, readability, traceability, the usage of QVTr's graphical syntax, and of short feedback cycles on the usability of QVTr. For this purpose, we propose a theoretical concept comprising techniques to push the modeling process towards a user-centered approach. The underlying key principles of our concept comprise the so called outward modeling style, a suggestion-driven process, interactive graphical model visualizations and the enforcement of conventions. To show the feasibility of our approach, we conduct user experiments in an industrial context at the LieberLieber Software GmbH company in Vienna, using a prototypical implementation. |
2017 |
Weghofer Stefan, Advisor: Univ.-Prof. Mag. Dr. Manuel Wimmer Moola - A Grovvy-based Model Operation Orchestration Language Masters Thesis TU Wien, Fakultät für Informatik, 2017. @mastersthesis{weghofer2017, title = {Moola - A Grovvy-based Model Operation Orchestration Language}, author = {Weghofer Stefan, Advisor: Univ.-Prof. Mag. Dr. Manuel Wimmer}, editor = {Manuel}, url = {https://resolver.obvsg.at/urn:nbn:at:at-ubtuw:1-112866 http://hdl.handle.net/20.500.12708/3416}, year = {2017}, date = {2017-09-13}, address = {A-1040 Wien, Karlsplatz 13}, school = {TU Wien, Fakultät für Informatik}, abstract = {A fundamental part of Model-Driven Engineering (MDE) is the use of models and operations. Models represent information of a target system on varying levels of abstraction, while operations allow performing actions on one or more models, including model validation, model transformation, model merging, etc. In recent years, more and more such operations and languages to describe them were introduced to allow MDE to be applied to a wide spectrum of use cases. Today, many advanced scenarios can be expressed by MDE and the use of new operation languages. In every non-trivial project, multiple operations have to be executed in particular order to yield the final result. To orchestrate operations to so-called operation chain, tools and languages have been developed and included to development environments that help in defining complex operation chains and executing them whenever input models change. In this thesis, existing tools and languages for model operation orchestration are analyzed and compared against each other. Inspiration is taken from these tools and other domains, such as Build Management and Workflow Management, to create a new tool for describing operation chains, called Moola. Based on a feature list derived from real-life use cases, Moola is designed and later implemented as domain-specific language (DSL) on top of Groovy. Finally, Moola is evaluated against use cases taken from the ARTIST project.}, keywords = {}, pubstate = {published}, tppubtype = {mastersthesis} } A fundamental part of Model-Driven Engineering (MDE) is the use of models and operations. Models represent information of a target system on varying levels of abstraction, while operations allow performing actions on one or more models, including model validation, model transformation, model merging, etc. In recent years, more and more such operations and languages to describe them were introduced to allow MDE to be applied to a wide spectrum of use cases. Today, many advanced scenarios can be expressed by MDE and the use of new operation languages. In every non-trivial project, multiple operations have to be executed in particular order to yield the final result. To orchestrate operations to so-called operation chain, tools and languages have been developed and included to development environments that help in defining complex operation chains and executing them whenever input models change. In this thesis, existing tools and languages for model operation orchestration are analyzed and compared against each other. Inspiration is taken from these tools and other domains, such as Build Management and Workflow Management, to create a new tool for describing operation chains, called Moola. Based on a feature list derived from real-life use cases, Moola is designed and later implemented as domain-specific language (DSL) on top of Groovy. Finally, Moola is evaluated against use cases taken from the ARTIST project. |
Wiesenhofer Christian, Advisor: Univ.-Prof. Mag. Dr. Manuel Wimmer Constraints and Models@Runtime for EMF Profiles Masters Thesis TU Wien, Fakultät für Informatik, 2017. @mastersthesis{wiesenhofer2017, title = {Constraints and Models@Runtime for EMF Profiles}, author = {Wiesenhofer Christian, Advisor: Univ.-Prof. Mag. Dr. Manuel Wimmer}, url = {https://resolver.obvsg.at/urn:nbn:at:at-ubtuw:1-98941 http://hdl.handle.net/20.500.12708/5123}, year = {2017}, date = {2017-04-19}, address = {A-1040 Wien, Karlsplatz 13}, school = {TU Wien, Fakultät für Informatik}, abstract = {Modeling languages play an essential part in the software engineering process. Currently, mostly UML is used for that purpose, but domain-specific modeling languages (DSMLs) get more and more attention. Their main benefit is a higher abstraction-level, which eases generating code from such models. One major drawback of DSMLs, is their time-consuming development. To tackle this problem the EMF Profiles project was founded. It provides a lightweight extension mechanism, just as UML profiles, to be used for DSMLs. This way models can be altered without modifying their whole metamodel and domain properties can be reused, thus reducing the required development time. In comparison to pure metamodel-based languages there are certain limitations in EMF Profiles. There is no way to model constraints regarding the restricted use of stereotypes or to include runtime behavior. A typical use case is for example to use multiple languages at once. However, considering these shortcomings, such an attempt is not possible. Thus the question emerged, how these features can be realized. In this thesis two extensions to EMF Profiles are presented and implemented as prototype, which is then evaluated using a case study. The research problems were solved by introducing an OCL constraint mechanism, which manages the stereotype application. Furthermore a generator was implemented to add AspectJ-based code fragments to profiles, so they can influence the runtime behavior of a model element. The case study was conducted by creating a base Petri net language and adding three Petri net extensions, implemented as EMF profiles, to it. All of their specifications could be fully implemented. Further metrics about the approach and the prototype were collected, in order to ensure it is assessable and comparable.}, keywords = {}, pubstate = {published}, tppubtype = {mastersthesis} } Modeling languages play an essential part in the software engineering process. Currently, mostly UML is used for that purpose, but domain-specific modeling languages (DSMLs) get more and more attention. Their main benefit is a higher abstraction-level, which eases generating code from such models. One major drawback of DSMLs, is their time-consuming development. To tackle this problem the EMF Profiles project was founded. It provides a lightweight extension mechanism, just as UML profiles, to be used for DSMLs. This way models can be altered without modifying their whole metamodel and domain properties can be reused, thus reducing the required development time. In comparison to pure metamodel-based languages there are certain limitations in EMF Profiles. There is no way to model constraints regarding the restricted use of stereotypes or to include runtime behavior. A typical use case is for example to use multiple languages at once. However, considering these shortcomings, such an attempt is not possible. Thus the question emerged, how these features can be realized. In this thesis two extensions to EMF Profiles are presented and implemented as prototype, which is then evaluated using a case study. The research problems were solved by introducing an OCL constraint mechanism, which manages the stereotype application. Furthermore a generator was implemented to add AspectJ-based code fragments to profiles, so they can influence the runtime behavior of a model element. The case study was conducted by creating a base Petri net language and adding three Petri net extensions, implemented as EMF profiles, to it. All of their specifications could be fully implemented. Further metrics about the approach and the prototype were collected, in order to ensure it is assessable and comparable. |
Novak Petr, Advisor: Dr. Radek Sindelar Design and Integration of Simulation Models for Industrial Systems PhD Thesis Czech Technical University in Prague, Faculty of Electrical Engineering, 2017. @phdthesis{novak2017, title = {Design and Integration of Simulation Models for Industrial Systems}, author = {Novak Petr, Advisor: Dr. Radek Sindelar}, url = {http://hdl.handle.net/10467/65523 https://dspace.cvut.cz/bitstream/handle/10467/65523/Disertace_Novak_Petr_2016.pdf?sequence=1&isAllowed=y}, year = {2017}, date = {2017-03-09}, address = {České vysoké učení technické v Praze, Technická 1902/2, 166 27 Praha 6 - Dejvice-Praha 6, Czechia}, school = {Czech Technical University in Prague, Faculty of Electrical Engineering}, abstract = {Industrial systems are becoming complex and large-scale. Optimization of their operation and testing of their control systems are done on simulation models frequently, because simulated experiments are faster, cheaper, and repeatable compared to experiments done on real industrial plants. However, design and re-design of simulation models are difficult and time-consuming tasks. In addition, integration of simulation models within industrial automation systems is not satisfactory nowadays. This thesis is aimed at improving the design and integration phases of the simulation model life-cycle. In the area of the simulation model design, especially a component-based approach for simulation model creation is investigated and improved in this thesis. It assumes that engineering systems consist of atomic components that are connected into topologies of real industrial plants. The proposed method supports assembling simulation models from simulation components, which can be reused from previous simulation projects. Each real device can be simulated by one of the available implementations of the component, representing this device. The proposed solution is based on the utilization of the bond-graph theory to guarantee the compatibility of the interfaces of the connected component implementations and to support their selection. In addition, the bond-graph theory is used to support splitting a simulation model into a set of simulation modules and their integration into a simulation workflow. For all of these types of tasks, the bond-graph theory was enhanced with an explicit description of component interfaces and a new causality assignment algorithm was designed. This algorithm can be used not only for generation of simulation models, but also for verifications on a conceptual planning level, whether specific sets of simulation component implementations are sufficient to model particular plants. In the area of the simulation model integration, two research threads are followed. The first one is related to formalizing, capturing, and integrating knowledge about the real industrial plant, input and output tags, parameters of devices, and mappings of all these entities to simulation model components, variables, and parameters. Such engineering knowledge is used to support simulation model design and maintenance of existing simulation models when a real plant is changed. The second thread in the integration area is focused on interoperability of simulation modules on the level of the supervisory control and data acquisition of the automation pyramid. This task covers the access of simulations to runtime data, improved parameter setting, and version-control of simulation modules. This thesis contributes to the areas of the simulation modeling, knowledge representation, and distributed system integration. The most important results are (i) adaptation of the bond graph theory for non-traditional applications including selection of explicitly specified component implementations as well as a new causality assignment algorithm supporting this approach, (ii) utilization of ontologies for supporting simulation model design and integration, and (iii) improved simulation model integration.}, keywords = {}, pubstate = {published}, tppubtype = {phdthesis} } Industrial systems are becoming complex and large-scale. Optimization of their operation and testing of their control systems are done on simulation models frequently, because simulated experiments are faster, cheaper, and repeatable compared to experiments done on real industrial plants. However, design and re-design of simulation models are difficult and time-consuming tasks. In addition, integration of simulation models within industrial automation systems is not satisfactory nowadays. This thesis is aimed at improving the design and integration phases of the simulation model life-cycle. In the area of the simulation model design, especially a component-based approach for simulation model creation is investigated and improved in this thesis. It assumes that engineering systems consist of atomic components that are connected into topologies of real industrial plants. The proposed method supports assembling simulation models from simulation components, which can be reused from previous simulation projects. Each real device can be simulated by one of the available implementations of the component, representing this device. The proposed solution is based on the utilization of the bond-graph theory to guarantee the compatibility of the interfaces of the connected component implementations and to support their selection. In addition, the bond-graph theory is used to support splitting a simulation model into a set of simulation modules and their integration into a simulation workflow. For all of these types of tasks, the bond-graph theory was enhanced with an explicit description of component interfaces and a new causality assignment algorithm was designed. This algorithm can be used not only for generation of simulation models, but also for verifications on a conceptual planning level, whether specific sets of simulation component implementations are sufficient to model particular plants. In the area of the simulation model integration, two research threads are followed. The first one is related to formalizing, capturing, and integrating knowledge about the real industrial plant, input and output tags, parameters of devices, and mappings of all these entities to simulation model components, variables, and parameters. Such engineering knowledge is used to support simulation model design and maintenance of existing simulation models when a real plant is changed. The second thread in the integration area is focused on interoperability of simulation modules on the level of the supervisory control and data acquisition of the automation pyramid. This task covers the access of simulations to runtime data, improved parameter setting, and version-control of simulation modules. This thesis contributes to the areas of the simulation modeling, knowledge representation, and distributed system integration. The most important results are (i) adaptation of the bond graph theory for non-traditional applications including selection of explicitly specified component implementations as well as a new causality assignment algorithm supporting this approach, (ii) utilization of ontologies for supporting simulation model design and integration, and (iii) improved simulation model integration. |
Other
2021 |
Winker, Dietmar; Biffl, Stefan; Mendez, Daniel; Wimmer, Manuel; Bergsmann, Johannes (Ed.) Software Quality: Future Perspectives on Software Engineering Quality Proceeding Springer, 404 , 2021, ISBN: 978-3-030-65853-3. @proceedings{DBLP:conf/swqd/2021, title = {Software Quality: Future Perspectives on Software Engineering Quality}, editor = {Dietmar Winker and Stefan Biffl and Daniel Mendez and Manuel Wimmer and Johannes Bergsmann}, doi = {10.1007/978-3-030-65854-0}, isbn = {978-3-030-65853-3}, year = {2021}, date = {2021-01-27}, volume = {404}, publisher = {Springer}, series = {LNBIP}, abstract = {This book constitutes the refereed proceedings of the 13th Software Quality Days Conference, SWQD 2021, which was planned to be held in Vienna, Austria, during January 19–21, 2021. Due to the COVID-19 pandemic, the conference was cancelled and will be merged with SWQD 2022. The Software Quality Days (SWQD) conference started in 2009 and has grown to the biggest conference on software quality in Europe with a strong community. The program of the SWQD conference is designed to encompass a stimulating mixture of practical presentations and new research topics in scientific presentations. The guiding conference topic of the SWQD 2021 is “Future Perspectives on Software Engineering Quality”. The 3 full papers and 5 short papers presented in this volume were carefully reviewed and selected from 13 submissions. The volume also contains 2 invited talks and one introductory paper for an interactive session. The contributions were organized in topical sections named: automation in software engineering; quality assurance for AI-based systems; machine learning applications; industry-academia collaboration; and experimentation in software engineering.}, keywords = {}, pubstate = {published}, tppubtype = {proceedings} } This book constitutes the refereed proceedings of the 13th Software Quality Days Conference, SWQD 2021, which was planned to be held in Vienna, Austria, during January 19–21, 2021. Due to the COVID-19 pandemic, the conference was cancelled and will be merged with SWQD 2022. The Software Quality Days (SWQD) conference started in 2009 and has grown to the biggest conference on software quality in Europe with a strong community. The program of the SWQD conference is designed to encompass a stimulating mixture of practical presentations and new research topics in scientific presentations. The guiding conference topic of the SWQD 2021 is “Future Perspectives on Software Engineering Quality”. The 3 full papers and 5 short papers presented in this volume were carefully reviewed and selected from 13 submissions. The volume also contains 2 invited talks and one introductory paper for an interactive session. The contributions were organized in topical sections named: automation in software engineering; quality assurance for AI-based systems; machine learning applications; industry-academia collaboration; and experimentation in software engineering. |
2020 |
Wally, Bernhard ISA-95 und AutomationML für die Modellierung von Produktionsanlagen und -prozessen Workshop Workshop der PLIM Gruppe des Prostep IVIP Vereins, 2020. @workshop{wally2020c, title = {ISA-95 und AutomationML für die Modellierung von Produktionsanlagen und -prozessen}, author = {Bernhard Wally}, year = {2020}, date = {2020-04-23}, booktitle = {Workshop der PLIM Gruppe des Prostep IVIP Vereins}, journal = {Workshop der PLIM Gruppe des Prostep IVIP Vereins}, abstract = {Hr. Wally stellt Grundlagen von ISA-95 und das Zusammenspiel von ISA-95 und AutomationML vor Diskussion/Kommentare • Spontan ergibt sich die Einschätzung, dass alles aus ISA mit AML abgebildet werden kann • Es müsste nicht aller Inhalt in einem einzigen AML file abgebildet sein, die Information ist auch in Bibliotheken auslagerbar/zerlegbar. • Aktuelles Ziel ist eine Implementierung mit AML • Die Audi und ZF Abläufe sollen in AML modelliert werden • Typische Herausforderung bei mehreren Daten Quellen/-Senken ist die eindeutige Beschreibung der Objekte.}, keywords = {}, pubstate = {published}, tppubtype = {workshop} } Hr. Wally stellt Grundlagen von ISA-95 und das Zusammenspiel von ISA-95 und AutomationML vor Diskussion/Kommentare • Spontan ergibt sich die Einschätzung, dass alles aus ISA mit AML abgebildet werden kann • Es müsste nicht aller Inhalt in einem einzigen AML file abgebildet sein, die Information ist auch in Bibliotheken auslagerbar/zerlegbar. • Aktuelles Ziel ist eine Implementierung mit AML • Die Audi und ZF Abläufe sollen in AML modelliert werden • Typische Herausforderung bei mehreren Daten Quellen/-Senken ist die eindeutige Beschreibung der Objekte. |
2019 |
Wimmer, Manuel Industrial robot meets digital twin Miscellaneous 2019. @misc{Wimmer2019Standard, title = {Industrial robot meets digital twin}, author = {Manuel Wimmer}, url = {https://derstandard.at/2000103109784/Industrieroboter-trifft-digitalen-Zwilling}, year = {2019}, date = {2019-05-19}, abstract = {A platform for intelligent industrial production is created at the Kepler University Linz. The aim is to create a virtual model. The digital twin describes one of the fundamental visions behind a digitized production of the future. The physical world is doubled in a virtual counter-world.}, keywords = {}, pubstate = {published}, tppubtype = {misc} } A platform for intelligent industrial production is created at the Kepler University Linz. The aim is to create a virtual model. The digital twin describes one of the fundamental visions behind a digitized production of the future. The physical world is doubled in a virtual counter-world. |
Wimmer, Manuel Software Evolution in Time and Space: Unifying Version and Variability Management Workshop Dagstuhl Seminar 19191, 2019. @workshop{Wimmer2019se, title = {Software Evolution in Time and Space: Unifying Version and Variability Management}, author = {Manuel Wimmer}, url = {https://www.dagstuhl.de/en/program/calendar/semhp/?semnr=19191}, year = {2019}, date = {2019-05-10}, booktitle = {Dagstuhl Seminar 19191}, abstract = {Modern software systems evolve rapidly and often need to exist in many variants. Consider the Linux kernel with its uncountable number of variants. Each variant addresses different requirements, such as runtime environments ranging from Android phones to large super-computers and server farms. At the same time, the Linux kernel frequently boasts new versions, managed by thousands of developers. Yet, software versions – resulting from evolution in time – and variants – resulting from evolution in space – are managed radically differently. Version management relies on a version control system (Git) and sophisticated workflows – concepts that have been developed for decades in the field of software configuration management (SCM).}, keywords = {}, pubstate = {published}, tppubtype = {workshop} } Modern software systems evolve rapidly and often need to exist in many variants. Consider the Linux kernel with its uncountable number of variants. Each variant addresses different requirements, such as runtime environments ranging from Android phones to large super-computers and server farms. At the same time, the Linux kernel frequently boasts new versions, managed by thousands of developers. Yet, software versions – resulting from evolution in time – and variants – resulting from evolution in space – are managed radically differently. Version management relies on a version control system (Git) and sophisticated workflows – concepts that have been developed for decades in the field of software configuration management (SCM). |
2018 |
Garrigós, I; Wimmer, Manuel (Ed.) Current Trends in Web Engineering Proceeding Springer, 2018, ISBN: 978-3-319-74432-2. @proceedings{Garrigos2018ctwe, title = {Current Trends in Web Engineering}, editor = {I. Garrigós and Manuel Wimmer}, doi = {10.1007/978-3-319-74433-9}, isbn = {978-3-319-74432-2}, year = {2018}, date = {2018-00-00}, publisher = {Springer}, series = {Current Trends in Web Engineering - ICWE 2017 International Workshops, Liquid Multi-Device Software and EnWoT, practi-O-web, NLPIT, SoWeMine}, keywords = {}, pubstate = {published}, tppubtype = {proceedings} } |
Draheim, D; Holmes, T; Wimmer, Manuel Multi-Level Model Transformation Workshop Dagstuhl Seminar 17492, 7 (12), Schloss Dagstuhl - Leibniz-Zentrum fuer Informatik, 2018, ISSN: 2192-5283. @workshop{Draheim2018mlmt, title = {Multi-Level Model Transformation}, author = {D. Draheim and T. Holmes and Manuel Wimmer}, editor = {João Paulo A. Almeida and Ulrich Frank and Thomas Kühne}, url = {http://drops.dagstuhl.de/opus/volltexte/2018/8675/}, doi = {10.4230/DagRep.7.12.18}, issn = {2192-5283}, year = {2018}, date = {2018-00-00}, booktitle = {Dagstuhl Seminar 17492}, volume = {7}, number = {12}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum fuer Informatik}, abstract = {This report documents the program and the outcomes of Dagstuhl Seminar 17492 "Multi-Level Modelling". This seminar brought together researchers and industry practitioners from the fields of conceptual modeling, ontologies, and formal foundations to discuss and share the benefits of Multi-Level Modelling (MLM), to develop an agreement on MLM terminology and scope, and to drive future research directions in MLM. Some foundational presentations were given by the seminar organizers to ground the discussions and provide an initial set of open questions which would lead to the formation of the working groups. In addition, six industry representatives gave talks explaining the needs, challenges, utility, and possible issues with adoption of MLM in industry. Based on the original seminar goals, the talks, and the resulting discussions, four working groups were established to investigate: the formal and ontological "Foundations"of MLM; promising "Applications" and potential evaluation criteria for MLM methods; the "Dynamic Aspects" of MLM, such as processes and behaviour; and, the use of and impact on "Model Transformations" in the context of MLM.}, keywords = {}, pubstate = {published}, tppubtype = {workshop} } This report documents the program and the outcomes of Dagstuhl Seminar 17492 "Multi-Level Modelling". This seminar brought together researchers and industry practitioners from the fields of conceptual modeling, ontologies, and formal foundations to discuss and share the benefits of Multi-Level Modelling (MLM), to develop an agreement on MLM terminology and scope, and to drive future research directions in MLM. Some foundational presentations were given by the seminar organizers to ground the discussions and provide an initial set of open questions which would lead to the formation of the working groups. In addition, six industry representatives gave talks explaining the needs, challenges, utility, and possible issues with adoption of MLM in industry. Based on the original seminar goals, the talks, and the resulting discussions, four working groups were established to investigate: the formal and ontological "Foundations"of MLM; promising "Applications" and potential evaluation criteria for MLM methods; the "Dynamic Aspects" of MLM, such as processes and behaviour; and, the use of and impact on "Model Transformations" in the context of MLM. |