Abstract: | The eXtensible Access Control Markup Language (XACML) has attracted significant attention from both industry and academia, and has become the de facto standard for the specification of access control policies. However, its XML-based verbose syntax and rich set of constructs make the authoring of XACML policies difficult and error-prone. Several automated tools have been proposed to analyze XACML policies before their actual deployment. However, most of the existing tools either cannot efficiently reason about non-Boolean attributes, which often appear in XACML policies, or restrict the analysis to a small set of properties. This work presents a policy analysis framework for the verification of XACML policies based on SAT modulo theories (SMT).We show how XACML policies can be encoded into SMT formulas, along with a query language able to express a variety of well-known security properties, for policy analysis. By being able to reason over non-Boolean attributes, our SMT-based policy analysis framework allows a fine-grained policy analysis while relieving policy authors of the burden of defining an appropriate level of granularity of the analysis. An evaluation of the framework shows that it is computationally efficient and requires less memory compared to existing approaches. |
Bibtex: | @article{THRZ17:CS,
author = {Turkmen, Fatih and den Hartog, Jerry and Ranise, Silvio and Zannone, Nicola}, title = {Formal Analysis of {XACML} Policies using {SMT}}, journal = {Computers \& Security}, volume = {66}, month = {May}, pages = {185--203}, publisher = {Elsevier Advanced Technology}, doi = {10.1016/j.cose.2017.01.009}, year = {2017}} |
Abstract: | Data leakage is at the heart most of the privacy breaches worldwide. In this paper we present a white-box approach to detect potential data leakage by spotting anomalies in database transactions. We refer to our solution as white-box because it builds self explanatory profiles that are easy to understand and update, as opposite to black-box systems which create profiles hard to interpret and maintain (e.g., neural networks). In this paper we introduce our approach and we demonstrate that it is a major leap forward w.r.t. previous work on the topic in several aspects: (i) it significantly decreases the number of false positives, which is orders of magnitude lower than in state-of-the-art comparable approaches (we demonstrate this using an experimental dataset consisting of millions of real enterprise transactions); (ii) it creates profiles that are easy to understand and update, and therefore it provides an explanation of the origins of an anomaly; (iii) it allows the introduction of a feedback mechanism that makes possible for the system to improve based on its own mistakes; and (iv) feature aggregation and transaction flow analysis allow the system to detect threats which span over multiple features and multiple transactions. |
Bibtex: | @article{CHPEP16:JISA,
author = {Costante, E and den Hartog, J and Petkovi{\'c}, Milan and Etalle, S and Pechenizkiy, M}, title = {A white-box anomaly-based framework for database leakage detection}, journal = {Journal of Information Security and Applications}, year = {2016}, publisher = {Elsevier}} |
Abstract: | Cloud storage services have become increasingly popular in recent years. Users are often registered to multiple cloud storage services that suit different needs. However, the ad hoc manner in which data sharing between users is implemented lead to issues for these users. For instance, users are required to define different access control policies for each cloud service that they use and are responsible for synchronizing their policies across different cloud providers. Users do not have access to a uniform and expressive method to deal with authorization. Current authorization solutions cannot be applied as-is, since they cannot cope with challenges specific to cloud environments. In this paper, we analyze the challenges of data sharing in multi-cloud environments and propose SAFAX, an XACML-based authorization service designed to address these challenges. SAFAX’s architecture allows users to deploy their access control policies in a standard format, in a single location, and augment policy evaluation with information from user selectable external trust services. We describe the architecture of SAFAX, a prototype implementation based on this architecture, illustrate the extensibility through external trust services and discuss the benefits of using SAFAX from both the user’s and cloud provider’s perspectives. |
Bibtex: | @article{KEHZ15:FRONTIERS,
title = {{SAFAX}-An Extensible Authorization Service for Cloud Environments}, author = {Kaluvuri,S.P. and Egner, A.I. and Hartog, J.I. den and Zannone, N. }, journal = {Frontiers in ICT}, volume = {2}, pages = {9}, publisher = {Frontiers}, year = 2015} |
Abstract: | Trust is fundamental for promoting the use of online services, such as e-commerce or e-health. Understanding how users perceive trust online is a precondition to create trustworthy marketplaces. In this article, we present a domain-independent general trust perception model that helps us to understand how users make online trust decisions and how we can help them in making the right decisions, which minimize future regret. We also present the results of a user study describing the weight that different factors in the model (e.g., security, look&feel, and privacy) have on perceived trust. The study identifies the existence of a positive correlation between the user's knowledge and the importance placed on factors such as security and privacy. This indicates that the impact factors as security and privacy have on perceived trust is higher in users with higher knowledge. |
Bibtex: | @article{CHP14:CI,
author = {Costante, E. and Hartog, J.I. den and Petkovi{\'c}, M.}, title = {Understanding perceived trust to reduce regret}, journal = {Computational Intelligence}, publisher = {Wiley}, volume = {31(2)}, pages = {327--347}, year = 2015, month = May, doi = {10.1111/coin.12025}} |
Abstract: | abstract not found |
Bibtex: | @article{BEHV11:JAR,
author = {Bauer, L. and Etalle, S. and Hartog, J.I. den and Vigan\'o, L.}, year = {2011}, title = {Preface of Special Issue on "Computer Security: Foundations and Automated Reasoning"}, journal = {Journal of Automated Reasoning}, volume = {46 (3-4)}, pages = {223--224}, Month = {April}} |
Abstract: | In service-oriented systems a constellation of services cooperate, sharing potentially sensitive information and responsibilities. Cooperation is only possible if the different participants trust each other. As trust may depend on many different factors, in a flexible framework for Trust Management (TM) trust must be computed by combining different types of information. In this paper we describe the TAS3 TM framework which integrates independent TM systems into a single trust decision point. The TM framework supports intricate combinations whilst still remaining easily extensible. It also provides a unified trust evaluation interface to the (authorization framework of the) services. We demonstrate the flexibility of the approach by integrating three distinct TM paradigms: reputation-based TM, credential-based TM, and Key Performance Indicator TM. Finally, we discuss privacy concerns in TM systems and the directions to be taken for the definition of a privacy-friendly TM architecture. |
Bibtex: | @article{BEHHTTZ10:JTAER,
number = {2}, month = {August}, official_url = {http://dx.doi.org/10.4067/S0718-18762010000200006 }, issn = {0718-1876}, author = {K. {B{\"o}hm} and S. {Etalle} and J. I. {den Hartog} and C. {H{\"u}tter} and S. {Trabelsi} and D. {Trivellato} and N. {Zannone}}, num_pages = {20}, address = {Chile}, publisher = {Universidad de Talca}, id_number = {10.4067/S0718-18762010000200006 }, journal = {Journal of Theoretical and Applied Electronic Commerce Research}, howpublished = {http://eprints.eemcs.utwente.nl/19069/}, volume = {5}, title = {A flexible architecture for privacy-aware trust management}, year = {2010}, pages = {77--96}} |
Abstract: | abstract not found |
Bibtex: | @article{Hartog08:SCP,
author = {Hartog, J.I. den}, title = {Towards mechanized correctness proofs for cryptographic algorithms: Axiomatization of a probabilistic Hoare style logic.}, publisher = {Elsevier}, journal = {Science of Computer Programming}, volume = {74}, number = {1-2}, pages = {52--63}, year = {2008}} |
Abstract: | In this paper we introduce a new framework for controlling compliance to discretionary access control policies [Cederquist et al. in Proceedings of the International Workshop on Policies for Distributed Systems and Networks (POLICY), 2005; Corin et al. in Proceedings of the IFIP Workshop on Formal Aspects in Security and Trust (FAST), 2004]. The framework consists of a simple policy language, modeling ownership of data and administrative policies. Users can create documents, and authorize others to process the documents. To control compliance to the document policies, we define a formal audit procedure by which users may be audited and asked to justify that an action was in compliance with a policy. In this paper we focus on the implementation of our framework.We present a formal proof system, which was only informally described in earlier work. We derive an important tractability result (a cut-elimination theorem), and we use this result to implement a proof-finder, a key component in this framework. We argue that in a number of settings, such as collaborative work environments, where a small group of users create and manage document in a decentralized way, our framework is a more flexible approach for controlling the compliance to policies. |
Bibtex: | @article{CCDEHL07:IJIS,
volume = {6}, number = {2-3}, howpublished = {http://eprints.eemcs.utwente.nl/9530/}, author = {J. G. Cederquist and R. J. Corin and M. A. C. Dekker and S. Etalle and J. I. den Hartog and G. Lenzini}, editor = {T. Dimitrakos and F. Martinelli and P. Y. A. Ryan and S. Schneider}, address = {Berlin}, title = {Audit-based compliance control}, publisher = {Springer Verlag}, pages = {133--151}, year = {2007}, journal = {International Journal of Information Security}, official_url = {http://dx.doi.org/10.1007/s10207-007-0017-y}, issn = {1615-5262}, num_pages = {19}, id_number = {10.1007/s10207-007-0017-y}} |
Abstract: | Probability, be it inherent or explicitly introduced, has become an important issue in the verification of many programs. We study a formalism which allows reasoning about programs which can act probabilistically. To describe probabilistic programs a basic programming language with an operator for probabilistic choice is introduced and a denotational semantics is given for this language. To specify properties of probabilistic programs, standard first order logic predicates are insufficient, so a notion of probabilistic predicates are introduced. A Hoare-style proof system to check properties of probabilistic programs is given. The proof system for a sublanguage is shown to be correct and complete; the properties that can be derived are exactly the valid properties. Finally some typical examples illustrate the use of the probabilistic predicates and the proof system. |
Bibtex: | @article{HV02:ijfcs,
author = {Hartog, J.I. den and Vink, E.P. de}, title = {Verifying Probabilistic Programs Using a {H}oare like Logic}, journal = {International Journal of Foundations of Computer Science}, publisher = {World Scientific}, volume = 13, number = 3, pages = {315--340}, year = 2002, official_url = {http://dx.doi.org/10.1142/S012905410200114X}} |
Abstract: | For a process language with action refinement and synchronization both an operational and a denotational semantics are given. The operational semantics is based on an SOS-style transition system specification involving syntactical refinement sequences. The denotational semantics is an interleaving model which uses semantical refinement `environments'. It identifies those statements which are equal under all refinements. The denotational model is shown to be fully abstract with respect to the operational one. The underlying metric machinery is exploited to obtain this full abstractness result. Usually, action refinement is treated either in a model with some form of true concurrency, or, when an interleaving model is applied, by assuming that the refining statements are atomized. We argue that an interleaving model without such atomization is attractive as well. |
Bibtex: | @article{HVB99:fi,
volume = {40}, howpublished = {http://eprints.eemcs.utwente.nl/1017/}, official_url = {http://fi.mimuw.edu.pl/vol40.html}, issn = {0169-2968}, author = {J. I. {den Hartog} and E. P. {de Vink} and J. W. {de Bakker}}, num_pages = {48}, title = {Full Abstractness of a Metric Semantics for Action Refinement}, pages = {335--382}, journal = {Fundamenta Informaticae}, year = {1999}} |
Abstract: | In this paper we present a protocol-aware anomaly detection framework specifically designed for back office networks together with a new automatic method for feature selection that allows to dramatically reduce the false positive rate (FPR) without compromising the detection rate (DR). The system monitors SMB and MS-RPC (the main protocols in back office networks) and takes into consideration specific features of SMB such as the presence of file paths, which are noisy, yet contain information necessary to detect some attacks. As a part of the framework we introduce a new method to cut the FPR by carefully building and selecting the right set of features to be monitored. In back office networks this is a challenging task where manual selection requires carefully exploring the network traffic to choose from numerous potential features. Also features need to be resilient to irregularities in the traffic caused by human involvement. Our framework automates selection utilizing two new metrics to determine the ‘quality’ of a feature: stability, i.e. its robustness to false alarms and granularity, i.e. the relative amount of information contained. Our experiments show a significant improvement in FPR-DR trade-off when our framework is used to select features in detection of network-based exploits and malicious file accesses. |
Bibtex: | @inproceedings{YEH16:ICISS,
title = {Towards Useful Anomaly Detection for Back Office Networks}, author = {Y{\"u}ksel, {\"O}mer and den Hartog, Jerry and Etalle, Sandro}, booktitle = {Proceedings twelfth International Conference on Information Systems Security (ICISS-2016)}, series = {LNCS} volume = {10063}, pages = {509--520}, year = {2016}, doi = {10.1007/978-3-319-49806-5_30}, publisher = {Springer International Publishing}} |
Abstract: | With the increasing popularity of collaborative systems like social networks, the risk of data misuse has become even more critical for users. As a consequence, there is a growing demand for solutions to properly protect data created and used within these systems. Enabling collaborative specification of permissions, while ensuring an appropriate levels of control to the different parties involved, inherently leads to decisions of some users being overruled by the policies of other users. Users need to be aware that this is happening and why, otherwise they may lose trust in the system, which can impact their willingness to collaborate. Enhancing user awareness requires that users know about and understand the conflicts that occurred. In this paper, we propose an approach to compute a justification for a decision in cases where conflicts occur and, based on this, generate feedback that explains users why their decision was not enforced. |
Bibtex: | @inproceedings{HZ16:ICISS,
title = {Collaborative Access Decisions: Why Has My Decision Not Been Enforced?}, author = {Hartog, J.I. den and Zannone, N.}, booktitle = {Proceedings twelfth International Conference on Information Systems Security (ICISS-2016)}, series = {LNCS} volume = {10063}, pages = {109--130}, year = {2016}, publisher = {Springer International Publishing}, doi = {10.1007/978-3-319-49806-5_6} isbn = {978-3-319-49806-5}, url = {http://dx.doi.org/10.1007/978-3-319-49806-5_6} } title = {Transparent Collaborative Access Control through Decision Justification}, |
Abstract: | Digital data theft is difficult to detect and typically it also takes a long time to discover that data has been stolen. This paper introduces a data-driven approach based on Markov chains to create believable decoy project folders which can assist in detecting potentially ongoing attacks. This can be done by deploying these intrinsically valueless folders between real project folders and by monitoring interactions with them. We present our approach and results from a user study demonstrating the believability of the generated decoy folders. |
Bibtex: | @inproceedings{THP16:DBSEC,
title = {Towards Creating Believable Decoy Project Folders for Detecting Data Theft}, author = {Thaler, Stefan and den Hartog, Jerry and Petkovi{\'c}, Milan}, booktitle = {IFIP Annual Conference on Data and Applications Security and Privacy}, pages = {161--169}, year = {2016}, organization = {Springer International Publishing}} |
Abstract: | As social networks, shared editing platforms and other collaborative systems are becoming increasingly popular, the demands for proper protection of the data created and used within these systems grows. Yet, existing access control mechanisms are not suited for the challenges imposed by collaborative systems. Two main challenges should be addressed: collaborative specification of permissions, while ensuring an appropriate levels of control to the different parties involved, and enabling transparency in decision making in cases where the access requirements of these different parties are in conflict. In this paper we propose a data governance model for collaborative systems, which allows the integration of access requirements specified by different users based on their relation with a data object. We also study the practical feasibility of enabling transparency by comparing different deployment options for transparency in XACML. |
Bibtex: | @inproceedings{MHZ16:DBSEC,
author = {Rauf Mahumudlu and Hartog, J.I. den and Zannone, N.}, title = {Data Governance \& Transparency for Collaborative Systems}, booktitle = {IFIP Annual Conference on Data and Applications Security and Privacy}, pages = {199--216}, year = 2016, organization = {Springer International Publishing}} |
Abstract: | Data loss, i.e. the unauthorized/unwanted disclosure of data, is a major threat for modern organizations. Data Loss Protection (DLP) solutions in use nowadays, either employ patterns of known attacks (signature-based) or try to find deviations from normal behavior (anomaly-based). While signature-based solutions provide accurate identification of known attacks and, thus, are suitable for the prevention of these attacks, they cannot cope with unknown attacks, nor with attackers who follow unusual paths (like those known only to insiders) to carry out their attack. On the other hand, anomaly-based solutions can find unknown attacks but typically have a high false positive rate, limiting their applicability to the detection of suspicious activities. In this paper, we propose a hybrid DLP framework that combines signature-based and anomaly-based solutions, enabling both detection and prevention. The framework uses an anomaly-based engine that automatically learns a model of normal user behavior, allowing it to flag when insiders carry out anomalous transactions. Typically, anomaly-based solutions stop at this stage. Our framework goes further in that it exploits an operator's feedback on alerts to automatically build and update signatures of attacks that are used to timely block undesired transactions before they can cause any damage. |
Bibtex: | @inproceedings{CEFHZ16:WRIT,
author = {Costante, E. and Etalle, S and Fauri, D. and Hartog, J.I. den and Zannone, N.}, title = {A Hybrid Framework for Data Loss Prevention and Detection}, booktitle = {Proceedings of the Workshop on Research for Insider Threats (WRIT 2016)}, year = {2016}, pages = {324--333}, organization = {IEEE}, doi = {10.1109/SPW.2016.24}, isbn = {978-1-5090-3690-5}, note = {workshop at IEEE Symposium on Security and Privacy}} |
Abstract: | Detection of previously unknown attacks and malicious messages is a challenging problem faced by modern network intrusion detection systems. Anomaly-based solutions, despite being able to detect unknown attacks, have not been used often in practice due to their high false positive rate, and because they provide little actionable information to the security officer in case of an alert. In this paper we focus on intrusion detection in industrial control systems networks and we propose an innovative, practical and semantics-aware framework for anomaly detection. The network communication model and alerts generated by our framework are userunderstandable, making them much easier to manage. At the same time the framework exhibits an excellent tradeoff between detection rate and false positive rate, which we show by comparing it with two existing payload-based anomaly detection methods on several ICS datasets. |
Bibtex: | @inproceedings{YEH16:SAC,
author = {\"Omer {Y\"uksel} and Jerry den {Hartog} and Sandro {Etalle}}, title = {Reading between the Fields: Practical, Effective Intrusion Detection for Industrial Control Systems}, booktitle = {Proceedings 31st ACM/SIGAPP Symposium on Applied Computing (SAC 2016)}, volume = {1}, month = {April}, address = {New York, NY, USA}, publisher = {ACM}, year = {2016}, num_pages = {8}, pages = {2063--2070}, doi = {10.1145/2851613.2851799}, isbn_13 = {978-1-4503-3739-7}, location = {Pisa, Italy}, event_dates = {April 4-8, 2016}, event_type = {Conference}, keywords = {Industrial control systems, Anomaly detection}, official_url = {http://dx.doi.org/10.1145/2851613.2851799}} |
Abstract: | Recent years have seen an exponential growth of the collection and processing of data from heterogeneous sources for a variety of purposes. Several methods and techniques have been proposed to transform and fuse data into "useful" information. However, the security aspects concerning the fusion of sensitive data are often overlooked. This paper investigates the problem of data fusion and derived data control. In particular, we identify the requirements for regulating the fusion process and eliciting restrictions on the access and usage of derived data. Based on these requirements, we propose an attribute-based policy framework to control the fusion of data from different information sources and under the control of different authorities. The framework comprises two types of policies: access control policies, which define the authorizations governing the resources used in the fusion process, and fusion policies, which define constraints on allowed fusion processes. We also discuss how such policies can be obtained for derived data. |
Bibtex: | @inproceedings{HZ16:ABAC,
author = {Hartog, J.I. den and Zannone, N.}, title = {A Policy Framework for Data Fusion and Derived Data Control}, booktitle = {Proceedings of the 2016 ACM International Workshop on Attribute Based Access Control}, year = 2016, pages = {47--57}, note = {Codaspy workshop}, doi = {10.1145/2875491.2875492}, isbn = {978-1-4503-4079-3}} |
Abstract: | In international military coalitions, situation awareness is achieved by gathering critical intel from different authorities. Authorities want to retain control over their data, as they are sensitive by nature, and, thus, usually employ their own authorization solutions to regulate access to them. In this paper, we highlight that harmonizing authorization solutions at the coalition level raises many challenges. We demonstrate how we address authorization challenges in the context of a scenario defined by military experts using a prototype implementation of SAFAX, an XACML-based architectural framework tailored to the development of authorization services for distributed systems. |
Bibtex: | @inproceedings{ELHZ16:CODASPY,
author = {Egner, A.I. and Luu, D and Hartog, J.I. den and Zannone, N.}, title = {An authorization service for collaborative situation awareness}, booktitle = {Proceedings of the 6th ACM Conference on Data and Application Security and Privacy (CODASPY 2016)}, pages = {136--138}, isbn = {978-1-4503-3935-3}, doi = {10.1145/2857705.2857740}, year = 2016} |
Abstract: | In this paper, we propose a hybrid Data Loss Protection framework that combines signature-based and anomaly-based solutions, enabling both detection and prevention of unauthorized disclosures of data. The framework uses an anomaly-based engine that automatically learns a model of normal user behavior, allowing it to flag when insiders carry out anomalous transactions. Typically, anomaly-based solutions stop at this stage. Our framework goes further in that it exploits an operator’s feedback on alerts to automatically build and update signatures of attacks which are used to timely block undesired transactions before they can cause any damage. |
Bibtex: | @inproceedings{CFEHZ16:SP,
author = {Costante, E. and Fauri, D. and Etalle, S and Hartog, J.I. den and Zannone, N.}, title = {Poster: Combining Data Loss Prevention and Detection}, booktitle = {IEEE Symposium on Security and Privacy}, year = 2016, note = {\url{http://www.ieee-security.org/TC/SP2016/poster-abstracts/58-poster_abstract.pdf}}} |
Abstract: | The eXtensible Access Control Markup Language (XACML) is an extensible and flexible XML language for the specification of access control policies. However, the richness and flexibility of the language (along with the verbose syntax of XML) come with a price: errors are easy to make and difficult to detect when policies grow in size. If these errors are not detected and rectified, they can result in serious data leakage and/or privacy violations leading to significant legal and financial consequences. To assist policy authors in the analysis of their policies, several policy analysis tools have been proposed based on different underlying formalisms. However, most of these tools either abstract away functions over non-Boolean domains (hence they cannot provide information about them) or produce very large encodings which hinder the performance. In this paper, we present a generic policy analysis framework that employs SMT as the underlying reasoning mechanism. The use of SMT does not only allow more fine-grained analysis of policies but also improves the performance. We demonstrate that a wide range of security properties proposed in the literature can be easily modeled within the framework. A prototype implementation and its evaluation are also provided. |
Bibtex: | @inproceedings{THRZ15:POST,
author = {Turkmen,F. and Hartog, J.I. den and Ranise, S. and Zannone, N.}, title = {Analysis of XACML Policies with SMT}, booktitle = {4th Conference on Principles of Security and Trust (POST 2015)}, year = 2015, doi = {10.1007/978-3-662-46666-7_7}, event = {European Joint Conferences on Theory and Practice of Software (ETAPS), 11--18 April 2015, London, UK}} |
Abstract: | Authentication and authorisation has been a basic and necessary service for internet transactions. With the evolution of e-commerce, traditional mechanisms for data security and access control are becoming outdated. Several new standards have emerged which allow dynamic access control based on exchanging user attributes. Unfortunately, while providing highly secure and flexible access mechanisms is a very demanding task, it cannot be considered a core competency for most e-commerce corporations. Therefore, a need to outsource or at least share such services with other entities arises. Authen-tication and Authorisation Infrastructures (AAIs) can provide such integrated federations of security services. They could, in particular, provide attribute-based access control (ABAC) mechanisms and mediate customers’ demand for privacy and vendors’ needs for information. We propose an AAI reference model that includes ABAC functionality based on the XACML standard and lessons learned from various existing AAIs. AAIs analysed are AKENTI, CARDEA, CAS, GridShib, Liberty ID-FF, Microsoft .NET Passport, PAPI, PERMIS, Shibboleth and VOMS. |
Bibtex: | @inproceedings{THASH15:ARES,
author = {Thaler, S. and Hartog, J. den and D. Ayed and D. Sommer and M.l. Hitchens}, title = {Attribute-based authentication and authorization for collaborative services}, booktitle = {Proc. of the 10th International Conference on Availability, Reliability and Security (ARES), Workshop AU2EU}, location = {Universit\'{e} Paul Sabatier, Toulouse, France}, data = {August 24-28}, doi = { 10.1109/ARES.2015.41}, year = {2015}} |
Abstract: | The protection of sensitive information is of utmost importance for organizations. The complexity and dynamism of modern businesses are forcing a re-think of traditional protection mechanisms. In particular, a priori policy enforcement mechanisms are often complemented with auditing mechanisms that rely on an a posteriori analysis of logs recording users’ activities to prove conformity to policies and detect policy violations when a valid explanation of conformity does not exist. However, existing auditing solutions require that the information necessary to assess policy compliance is available for the analysis. This assumption is not realistic. Indeed, a good deal of users’ activities may not be under the control of the IT system and thus they cannot be logged. In this paper we tackle the problem of accessing policy compliance in presence of incomplete logs. In particular, we present an auditing framework to assist analysts in finding a valid explanation for the events recorded in the logs and to pinpoint policy violations if such an explanation does not exist, when logs are incomplete. We also introduce two strategies for the refinement of plausible explanations of conformity to drive analysts along the auditing process. Our framework has been implemented on top of CIFF, an abductive proof procedure, and the efficiency and effectiveness of the refinement strategies evaluated. Keywords: Abduction, Policy Compliance, Abductive Reasoning |
Bibtex: | @inproceedings{SHEZ15:HOTSPOT,
author = {Sher, U.S. and Hartog, J.I. den and Etalle, S. and Zannone, N.}, year = {2015}, title = {Auditing with incomplete logs}, booktitle = {Proceedings of the 3rd Workshop on Hot Issues in Security Principles and Trust}, place = {London, UK}, date = {April 18, 2015}, event = {affiliated with ETAPS 2015}, pages = {1--23}} |
Abstract: | Building Automation Systems (BAS), alternatively known as Building Management Systems (BMS), which centralise the management of building services, are often connected to corporate networks and are routinely accessed remotely for operational management and emergency purposes. The protocols used in BAS, in particular BACnet, were not designed with security as a primary requirement, thus the majority of systems operate with sub-standard or non-existent security implementations. As intrusion is thus likely easy to achieve, intrusion detection systems should be put in place to ensure they can be detected and mitigated. Existing intrusion detection systems typically deal only with known threats (signature-based approaches) or suffer from a high false positive rate (anomaly-based approaches). In this paper we present an overview of the problem space with respect to BAS, and suggest that state aware machine learning techniques could be used to discover threats that comprise a collection of legitimate commands. We provide a first step showing that the concept can be used to detect an attack where legitimate write commands being sent in rapid succession may cause system failure. We capture the state as a ‘time since last write’ event and use a basic artificial neural network classifier to detect attacks. |
Bibtex: | @inproceedings{PJH15:AISM,
author = {Johnstone, Michael N. and Peacock, Matthew and Hartog, J.I. den}, year = {2015}, title = {Timing attack detection on {BACNET} via a machine learning approach}, booktitle = {13th Australian Information Security Management Conference}, place = {Perth, AU}, date = {Nov 30-Dec 2, 2015}, event = {part of 2015 SRI Security Congress}, pages = {}} |
Abstract: | In this paper we propose a basic framework to merge security controllers with probabilistic concepts. This framework provides a first step towards quantitative security achieved by probabilistic controllers. It extends the framework for specification, analysis, and automatic generation of security controllers provided in [21, 23] by considering probabilistic aspects of the behaviour of both the target process and the controller. Controllers may actively try to influence the choice of action of the target system or only passively react to actions the target system tried to perform. In a non-probabilistic setting both active and passive controllers can be expressed by the same model. In a probabilistic setting, however, these two types of controllers can differ. We respectively use the notions of generative and reactive processes to capture this distinction and discuss the different behaviours obtaining in the different settings. |
Bibtex: | @inproceedings{HM14:QASA,
author = {Hartog, J.I. den and Matteucci, I.}, title = {Introducing Probabilities in Controller Strategies}, booktitle = {Post-conference Proceedings 3rd international workshop on Quantitative Aspects in Security Assurance (QASA 2014), LNCS 8872}, editor = {Garcia-Alfaro, Joaquin and Herrera-Joancomart\'i, Jordi and Lupu, Emil and Posegga, Joachim and Aldini, Alessandro and Martinelli, Fabio and Suri, Neeraj}, publisher = {Springer International Publishing}, year = {2015}, pages = {233-249}, isbn = {978-3-319-17015-2}, url = {http://dx.doi.org/10.1007/978-3-319-17016-9_15}, keywords = {Security controller; Probability; Reactive; Generative}, event = {ESORICS 2014 - European Symposium on Research in Computer Security}, doi = {10.1007/978-3-319-17016-9_15}} |
Abstract: | abstract not found |
Bibtex: | @inproceedings{ABCH14:TRUST,
author = {Ayed, D. and Bichsel, P. and Camenisch, J. and den Hartog, J.I.}, title = {Integration of Data-Minimising Authentication into Authorisation Systems}, booktitle = {Proceedings of 7th International Conference on Trust and Trustworthy Computing (Trust 2014)}, volume = {LNCS 8564}, editor = {Holz, Thorsten and Ioannidis, Sotiris}, publisher = {Springer International Publishing}, year = 2014, pages = {179--187}, isbn = {978-3-319-08592-0}, url = {http://dx.doi.org/10.1007/978-3-319-08593-7_12}, keywords = {authentication; authorisation; access control; privacy; XACML}, doi = {10.1007/978-3-319-08593-7_12}} |
Abstract: | Data leakage causes significant losses and privacy breaches worldwide. In this paper we present a white-box data leakage detection system to spot anomalies in database transactions. We argue that our approach represents a major leap forward w.r.t. previous work because: i) it significantly decreases the False Positive Rate (FPR) while keeping the Detection Rate (DR) high; on our experimental dataset, consisting of millions of real enterprise transactions, we measure a FPR that is orders of magnitude lower than in state-of-the-art comparable approaches; and ii) the white-box approach allows the creation of self-explanatory and easy to update profiles able to explain why a given query is anomalous, which further boosts the practical applicability of the system. |
Bibtex: | @inproceedings{CHPEP14:DBSEC,
author = {Costante, E. and den Hartog, J.I. and Petkovi{\'c}, M and Etalle, S. and Pechenizkiy, M.}, title = {Hunting the Unknown - White-Box Database Leakage Detection}, booktitle = {Proceedings of 28th IFIP WG 11.3 Conference on Data and Applications Security and Privacy (DBSEC2014)}, series = {LNCS}, volume = {8566}, pages = {243--259}, event = {Fourth International Symposium on Security in Collaboration Technologies and Systems (SECOTS 2014)}, year = 2014} |
Abstract: | abstract not found |
Bibtex: | @inproceedings{DHZ14:SECOTS,
author = {Damen, S. and Hartog, J.I. den and Zannone, N.}, title = {CollAC: Collaborative Access Control}, booktitle = {Proceedings of Conference on Collaboration Technologies and Systems (CTS 2014)}, event = {Fourth International Symposium on Security in Collaboration Technologies and Systems (SECOTS 2014)}, year = 2014} |
Abstract: | The exibility and expressiveness of eXtensible Access Control Markup Language (XACML) allows the specification of a wide range of policies in different access control models. However, XACML policies are often verbose and, thus, prone to errors. Several tools have been developed to assist policy authors for the verification and analysis of policies, but most of them are limited in the types of analysis they can perform. In particular, they are not able to reason about predicates of non-boolean variables and, even if they do, they do it inefficiently. In this paper, we present the X2S framework, a formal framework for the analysis of XACML policies that employs Satisfiability Modulo Theories (SMT) as the underlying reasoning mechanism. The use of SMT not only allows more fine-grained analysis of policies, but it also improves the performance of policy analysis significantly. |
Bibtex: | @inproceedings{THZ14:CCS,
author = {F. Turkmen and Hartog, J.I. den and N. Zannone}, title = {POSTER: Analyzing Access Control Policies with {SMT}}, booktitle = {Proceedings of the 21st ACM Conference on Computer and Communications Security (CCS 2014)}, event = {}, publisher = {ACM}, year = 2014} |
Abstract: | Systems dealing with personal information are legally required to satisfy the principle of data minimisation. Privacy-enhancing protocols use cryptographic primitives to minimise the amount of personal information exposed by communication. However, the complexity of these primitives and their interplay makes it hard for non-cryptography experts to understand the privacy implications of their use. In this paper, we present TRIPLEX, a framework for the analysis of data minimisation in privacy-enhancing protocols. Keywords: Data minimisation; Coalition Graphs; Detectability; Linkability |
Bibtex: | @inproceedings{VBHZ13:CCS,
author = {Veeningen, M. and Bruso, M. and Hartog, J.I. den and Zannone, N.}, title = {TRIPLEX: Verifying Data Minimisation in Communication Systems}, booktitle = {Proceedings of the 20th ACM SIGSAC Conference on Computer and Communication Security (CCS'13, Berlin, Germany, November 4-8, 2013)}, pages = {1415--1418}, publisher = {ACM}, year = 2013} |
Abstract: | A privacy policy is a legal document, used by websites to communicate how the personal data that they collect will be managed. By accepting it, the user agrees to release his data under the conditions stated by the policy. Privacy policies should provide enough information to enable users to make informed decisions. Privacy regulations support this by specifying what kind of information has to be provided. As privacy policies can be long and difficult to understand, users tend not to read them. Because of this, users generally agree with a policy without knowing what it states and whether aspects important to him are covered at all. In this paper we present a solution to assist the user by providing a structured way to browse the policy content and by automatically assessing the completeness of a policy, i.e. the degree of coverage of privacy categories important to the user. The privacy categories are extracted from privacy regulations, while text categorization and machine learning techniques are used to verify which categories are covered by a policy. The results show the feasibility of our approach; an automatic classifier, able to associate the right category to paragraphs of a policy with an accuracy approximating that obtainable by a human judge, can be effectively created. |
Bibtex: | @inproceedings{CSPH12:WPES,
author = {Costante, E. and Sun, Y. and Petkovi{\'c}, M. and Hartog, J.I. den}, title = {A machine learning solution to assess privacy policy completeness}, booktitle = {Proceedings of the 2012 ACM Workshop on Privacy in the Electronic Society (WPES co-located with CCS 2012)}, date = {October 15, 2012}, event_location= {Raleigh NC, USA}, pages = {91--96}, publisher = {ACM}, year = 2012} |
Abstract: | Unlinkability is a privacy property of crucial importance for several systems (such as RFID or voting systems). Informally, unlinkability states that, given two events/items in a system, an attacker is not able to infer whether they are related to each other. However, in the literature we find several definitions for this notion, which are apparently unrelated and shows a potentially problematic lack of agreement. This paper sheds new light on unlinkability by comparing different ways of defining it and showing that in many practical situations the various definitions coincide. It does so by (a) expressing in a unifying framework four definitions of unlinkability from the literature (b) demonstrating how these definitions are different yet related to each other and to their dual notion of “inseparability” and (c) by identifying conditions under which all these definitions become equivalent. We argue that the conditions are reasonable to expect in identification systems, and we prove that they hold for a generic class of protocols. |
Bibtex: | @inproceedings{MKEH12:TGC,
author = {Bruso, M. and Chatzikokolakis, K. and Etalle, S. and Hartog, J.I. den}, title = {Linking Unlinkability}, editor = {C. Palamidessi and M.D. Ryan}, booktitle = {Proceedings of the 7th International Symposium on Trustworthy Global Computing (TGC 2012). Revised Selected Papers.}, event_location = {Newcastle upon Tyne, UK}, date = {September 7-8, 2012}, volume = {LNCS~8191}, pages = {129--144}, publisher = {Springer-Verlag}, year = {2012}} |
Abstract: | The need for privacy protection on the Internet is well recognized. Everyday users are asked to release personal information in order to use online services and applications. Service providers do not always need all the data they gather to be able to offer a service. Thus users should be aware of what data is collected by a provider to judge whether this is too much for the services offered. Providers are obliged to describe how they treat personal data in privacy policies. By reading the policy users could discover, amongst others, what personal data they agree to give away when choosing to use a service. Unfortunately, privacy policies are long legal documents that users notoriously refuse to read. In this paper we propose a solution which automatically analyzes privacy policy text and shows what personal information is collected. Our solution is based on the use of Information Extraction techniques and represents a step towards the more ambitious aim of automated grading of privacy policies. |
Bibtex: | @inproceedings{CHP12:DPM,
author = {Costante, E. and Hartog, J.I. den and Petkovi{\'c}, M.}, title = {What Websites Know About You - Privacy Policy Analysis Using Information Extraction}, booktitle = {Proceedings of the 7th DPM International Workshop on Data Privacy Management and Autonomous Spontaneous Security. (Revised Selected Papers)}, volume = {LNCS~7731}, editor = {R. Di Pietro and J. Herranz and E. Damiani and R. State}, publisher = {Springer-Verlag}, pages = {146--159}, year = {2012}} |
Abstract: | abstract not found |
Bibtex: | @inproceedings{CHP11:STAST,
author = {E. Costante and Hartog, J.den and M. Petkovi{\'c}}, title = {On-line Trust Perception: What Really Matters}, booktitle = {Proceedings of the 1st Workshop on Socio-Technical Aspects in Security and Trust (STAST'11)}, event_location= {Milan, Italy}, pages = {52--59}, publisher = {IEEE Computer Society}, year = {2011}} |
Abstract: | abstract not found |
Bibtex: | @inproceedings{CLHPA11:CLOUD,
author = {Chadwick, D.W. and Lievens, S.F. and Hartog, J.I. den and Pashalidis, A. and Alhadeff, J.}, year = 2011, title = {My private cloud overview : a trust, privacy and security infrastructure for the cloud}, booktitle = {Proceedings of the 4th IEEE International Conference on Cloud Computing (CLOUD 2011)}, publisher = {IEEE Computer Society}, event_location = {Washington DC, USA}, event_dates= {July 4-9}, pages = {752--753}} |
Abstract: | abstract not found |
Bibtex: | @inproceedings{PHWW10:SAC,
title = {Improving DPA by peak distribution analysis}, author= {J. Pan and Woudenberg, J.G.J. van and Hartog, J.I. den and M.F. Witteman}, editor = {A. Biryukov and G. Gong and D.R. Stinson}, booktitle = {Selected Areas in Cryptography (17th international workshop, SAC~2010, Revised Selected Papers), LNCS~6544}, pages = {241--261}, publisher = {Springer-Verlag}, year = 2011, address = {Waterloo, Ontario, Canada}, event_dates = {August 12-13, 2010}} |
Abstract: | Simple non-interference is too restrictive for specifying and enforcing information flow policies in most programs. Exceptions to non-interference are provided using declassification policies. Several approaches for enforcing declassification have been proposed in the literature. In most of these approaches, the declassification policies are embedded in the program itself or heavily tied to the variables in the program being analyzed, thereby providing little separation between the code and the policy. Consequently, the previous approaches essentially require that the code be trusted, since to trust that the correct policy is being enforced, we need to trust the source code. In this paper, we propose a novel framework in which declassification policies are related to the source code being analyzed via its I/O channels. The framework supports many of the of declassification policies identified in the literature. Based on flow-based static analysis, it represents a first step towards a new approach that can be applied to untrusted and legacy source code to automatically verify that the analyzed program complies with the specified declassification policies. The analysis works by constructing a conservative approximation of expressions over input channel values that could be output by the program, and by determining whether all such expressions satisfy the declassification requirements stated in the policy. We introduce a representation of such expressions that resembles tree automata. We prove that if a program is considered safe according to our analysis then it satisfies a property we call Policy Controlled Release, which formalizes information-flow correctness according to our notion of declassification policy. We demonstrate, through examples, that our approach works for several interesting and useful declassification policies, including one involving declassification of the average of several confidential values. |
Bibtex: | @inproceedings{RBHWE10:SP,
month = {May}, official_url = {http://dx.doi.org/10.1109/SP.2010.14}, author = {B. {Pontes Soares Rocha} and S. {Bandhakavi} and J. I. {den Hartog} and W. H. {Winsborough} and S. {Etalle}}, num_pages = {16}, pres_types = {Talk}, address = {USA}, publisher = {IEEE Society press}, id_number = {10.1109/SP.2010.14}, journal = {Proceedings IEEE Sumposium on Security and Privacy}, isbn_13 = {978-0-7695-4035-1}, howpublished = {http://eprints.eemcs.utwente.nl/19077/}, location = {Oakland, California}, event_type = {Conference}, event_dates = {May 16-19, 2010}, booktitle = {IEEE Symposium on Security and Privacy, Oakland, California}, title = {Towards static flow-based declassification for legacy and untrusted programs}, year = {2010}, pages = {93--108}} |
Abstract: | abstract not found |
Bibtex: | @inproceedings{BCH10:CSF,
author = {Bruso, M. and Chatzikokolakis, K. and Hartog, J.I. den}, year = {2010}, title = {Formal verification of privacy for RFID systems}, booktitle = {Proceedings of the 23rd IEEE Computer Security Foundations Symposium (CSF'10, Edinburgh, UK, July 17-19, 2010)}, pages = {75--88}, publisher = {IEEE Computer Society}} |
Abstract: | abstract not found |
Bibtex: | @inproceedings{LPH10:ACNS,
author = {J. Lu and J. Pan and J. den Hartog}, title = {Principles on the Security of AES against First and Second-Order Differential Power Analysis}, editor={Jianying Zhou and Moti Yung}, booktitle={Proceedings of the 8th International Conference on Applied Cryptography and Network Security (ACNS'10)}, year={2010}, series={Lecture Notes in Computer Science}, volume={6123}, pages ={168--185}, publisher ={Springer-Verlag}} |
Abstract: | abstract not found |
Bibtex: | @inproceedings{SHE09:FAST,
author = {Spiessens, A.O.D. and Hartog, J.I. den and Etalle, S.}, year = {2009}, title = {Know what you trust: Analyzing and designing trust policies with Scoll}, editor = {P. Degano and J. Guttma and F. Martinelli}, booktitle = {Formal Aspects in Security and Trust 2008. Revised Selected Papers, LNCS~5491}, publisher = {Springer-Verlag}, location = {Malaga, Spain}, event_dates = {October 9-10, 2008}, official_url = {http://dx.doi.org/10.1007/978-3-642-01465-9_9}, pages = {129--142}} |
Abstract: | abstract not found |
Bibtex: | @inproceedings{PLH09:ISA,
author = {J. Pan and J. Lu and Hartog, J.I. den}, title = {You Cannot Hide Behind the Mask: Power Analysis on a Provably Secure S-Box Implementation}, editor = {H.Y. Youm and M. Yung}, booktitle = {Information Security Applications (10th International Workshop, WISA 2009, Busan, Korea, August 25-27, 2009, Revised Selected Papers )}, series = {Lecture Notes in Computer Science, Vol. 5932}, pages = {178-192}, publisher = {Springer}, year = 2009} |
Abstract: | abstract not found |
Bibtex: | @inproceedings{PHV08:SEC,
official_url = {http://link.springer.com/chapter/10.1007/978-0-387-09699-5_28}, author = {J. Pan and Hartog, J.I. den and Vink, E.P. de}, booktitle = {Proc. IFIP 23rd International Information Security Conferenfce (SEC08)}, title = {An Operation-Based Metric for DPA Resistance}, series = {IFIP Conference Proceedings, Vol.~278}, publisher = {Springer-Verlag}, year = {2008}, pages = {429--443}, location = {Milan, Italy}, event_type = {Conference}, event_dates = {8-10 September 2008}} |
Abstract: | In recent years we have witnessed a great increase in the interest in Trust Management (TM) techniques both from the industrial and the academic sectors. The booming research has also determined a duality in the very definition of TM system which can lead to confusion. In one of the two categories of TM systems a great deal of work has yet to be done in advancing the inherently adaptive nature of trust. This position paper examines reasons for the success of TM, the two broad TM categories, and, for reputation-based TM, issues of ?Regret Management' and accountability that are necessary enhancements on the road leading to much more sophisticated TM architectures. |
Bibtex: | @inproceedings{EHM07:Automics,
number = {302}, month = {December}, official_url = {http://dx.doi.org/10.1145/1365562.1365569}, author = {S. {Etalle} and J. I. {den Hartog} and S. {Marsh}}, num_pages = {6}, series = {ACM International Conference Proceeding Series}, pres_types = {Invited/Keynote Talk}, publisher = {ACM: Association for Computing Machinery}, id_number = {10.1145/1365562.1365569}, isbn_13 = {978-963-9799-09-7}, howpublished = {http://eprints.eemcs.utwente.nl/12914/}, location = {Rome, Italy}, event_type = {Conference}, booktitle = {Proceedings of the 1st International Conference on Autonomic Computing and Communication Systems, Autonomics, Rome, Italy}, title = {Trust and Punishment (Invited paper)}, year = {2007}} |
Abstract: | We extend a Probabilistic Hoare-style logic to formalize game-based cryptographic proofs. Our approach provides a systematic and rigorous framework, thus preventing errors from being introduced. We illustrate our technique by proving semantic security of ElGamal. |
Bibtex: | @inproceedings{CH06:ICALP,
volume = {4052}, howpublished = {http://eprints.eemcs.utwente.nl/1785/}, month = {July}, author = {R. J. Corin and J. I. den Hartog}, series = {Lecture Notes in Computer Science}, booktitle = {ICALP 2006 track C, Venice, Italy}, editor = {M. Bugliesi and B. Preneel and V. Sassone}, type = {Technical Report}, address = {Berlin}, title = {A Probabilistic Hoare-style logic for Game-based Cryptographic Proofs}, publisher = {Springer-Verlag}, pages = {252--263}, year = {2006}, institution = {University of Twente}, location = {Venice, Italy}, event_type = {Conference}, event_dates = {9-13 Jul 2006}, official_url = {http://dx.doi.org/10.1007/11787006_22}, num_pages = {12}, pres_types = {Talk}, isbn = {3-540-35907-9}, id_number = {10.1007/11787006_22}} |
Abstract: | We describe and implement a policy language. In our system, agents can distribute data along with usage policies in a decentralized architecture. Our language supports the specification of conditions and obligations, and also the possibility to refine policies. In our framework, the compliance with usage policies is not actively enforced. However, agents are accountable for their actions, and may be audited by an authority requiring justifications. |
Bibtex: | @inproceedings{CCDEH05:POLICY,
month = {June}, official_url = {http://doi.ieeecomputersociety.org/10.1109/POLICY.2005.5}, author = {J. G. {Cederquist} and R. J. {Corin} and M. A. C. {Dekker} and S. {Etalle} and J. I. {den Hartog}}, num_pages = {10}, pres_types = {Talk}, editor = {A. {Sahai} and W. H. {Winsborough}}, isbn = {0-7695-2265-3}, address = {Los Alamitos, California}, publisher = {IEEE Computer Society}, id_number = {10.1109/POLICY.2005.5}, howpublished = {http://eprints.eemcs.utwente.nl/692/}, location = {Stockholm, Sweden}, event_type = {Workshop}, booktitle = {6th Int. Workshop on Policies for Distributed Systems \& Networks (POLICY), Stockholm, Sweden}, title = {An Audit Logic for Accountability}, year = {2005}, pages = {34--43}} |
Abstract: | abstract not found |
Bibtex: | @InProceedings{CTDEHH05:STM,
author = {Czenko, M. and Tran, H. and Doumen, J.M. and Etalle, S. and Hartel, P.H. and Hartog, J.I. den}, title = {Nonmonotonic trust management for P2P applications}, editor = {S. Mauw and V. Isarny and C. Cremers}, booktitle = {Proceedings of the First International Workshop on Security and Trust Management (STM'05), Milan, Italy}, date = {September 15, 2005}, publisher = {Elsevier Science}, series = {Electronic Notes in Theoretical Computer Science}, volume = {Vol. 157(3)}, pages = {113--130}, year = {2005}, official_url = {http://dx.doi.org/10.1016/j.entcs.2005.09.037}, issn = {1571-0661}} |
Abstract: | abstract not found |
Bibtex: | @inproceedings{STHHS05:LCN,
month = {November}, official_url = {http://doi.ieeecomputersociety.org/10.1109/LCN.2005.71}, author = {V. {Sundramoorthy} and C. {Tan} and P. H. {Hartel} and J. I. {den Hartog} and J. {Scholten}}, num_pages = {9}, isbn = {0-7695-2421-4}, address = {Los Alamitos, California}, publisher = {IEEE Computer Society}, id_number = {10.1109/LCN.2005.71}, howpublished = {http://eprints.eemcs.utwente.nl/709/}, location = {Sydney, Australia}, event_type = {Conference}, booktitle = {30th Annual IEEE Conf. on Local Computer Networks (LCN), Sydney, Australia}, research_projects = { |
Abstract: | We argue that among denial-of-service (DoS) attacks, link-layer jamming is a more attractive option to attackers than radio jamming is. By exploiting the semantics of the link-layer protocol (aka MAC protocol), an attacker can achieve better efficiency than blindly jamming the radio signals alone. We investigate some jamming attacks of S-MAC, what level of effectiveness and efficiency the attack can potentially achieve, and what countermeasures can be implemented against these attacks. |
Bibtex: | @inproceedings{LHHH05:EWSN,
month = {January}, official_url = {http://doi.ieeecomputersociety.org/10.1109/EWSN.2005.1462013}, author = {Y. W. {Law} and P. H. {Hartel} and J. I. {den Hartog} and P. J. M. {Havinga}}, num_pages = {9}, isbn = {0-7803-8801-1}, address = {Los Alamitos, California}, publisher = {IEEE Computer Society}, id_number = {10.1109/EWSN.2005.1462013}, howpublished = {http://eprints.eemcs.utwente.nl/703/}, location = {Istanbul, Turkey}, event_type = {Workshop}, booktitle = {2nd European Workshop on Wireless Sensor Networks (EWSN), Istanbul, Turkey}, title = {Link-layer jamming attacks on S-MAC}, year = {2005}, pages = {217--225}} |
Abstract: | We propose a language that allows agents to distribute data with usage policies in a decentralized architecture. In our framework, the compliance with usage policies is not enforced. However, agents may be audited by an authority at an arbitrary moment in time. We design a logic that allows audited agents to prove their actions, and to prove their authorization to posses particular data. Accountability is defined in several flavors, including agent accountability and data accountability. Finally, we show the soundness of the logic. |
Bibtex: | @inproceedings{CEHLS04:FAST,
month = {August}, official_url = {http://dx.doi.org/10.1007/0-387-24098-5_14}, author = {R. J. {Corin} and S. {Etalle} and J. I. {den Hartog} and G. {Lenzini} and I. S. {Staicu}}, num_pages = {15}, pres_types = {Talk}, editor = {T. {Dimitrakos} and F. {Martinelli}}, isbn = {0-387-24050-0}, address = {Berlin}, publisher = {Springer-Verlag}, id_number = {10.1007/0-387-24098-5_14}, howpublished = {http://eprints.eemcs.utwente.nl/767/}, volume = {IFIP 173}, location = {Toulouse, France}, event_type = {Workshop}, booktitle = {2nd Int. Workshop on Formal Aspect of Security and Trust (FAST), Toulouse, France}, title = {A Logic for Auditing Accountability in Decentralized Systems}, year = {2004}, pages = {187--201}} |
Abstract: | This paper focuses on the usability of the PINPAS tool. The PINPAS tool is an instruction-level interpreter for smartcard assembler languages, augmented with facilities to study side-channel vulnerabilities. The tool can simulate side-channel leakage and has a suite of utilities to analyze this. The usage of the tool, for the analysis of a cryptographic algorithm is illustrated using the standard AES and RSA. Vulnerabilities of the implementations are identified and protective measures added. It is argued, that the tool can be instrumental for the design and realization of secure smartcard implementations in a systematic way. |
Bibtex: | @inproceedings{HV04:FAST,
month = {August}, official_url = {http://dx.doi.org/10.1007/0-387-24098-5_7}, author = {J. I. {den Hartog} and E. P. {de Vink}}, num_pages = {14}, editor = {T. {Dimitrakos} and F. {Martinelli}}, isbn = {0-387-24050-0}, address = {Boston, Massachusetts}, publisher = {Kluwer Academic Publishers}, howpublished = {http://eprints.eemcs.utwente.nl/789/}, volume = {IFIP 173}, location = {Toulouse, France}, event_type = {Workshop}, booktitle = {2nd Int. Workshop on Formal Aspect of Security and Trust (FAST), Toulouse, France}, title = {Virtual Analysis and Reduction of Side-Channel Vulnerabilities of Smartcards}, year = {2004}, pages = {85--98}} |
Abstract: | This paper describes the PINPAS tool, a tool for the simulation of power analysis and other side-channel attacks on smartcards. The PINPAS tool supports the testing of algorithms for vulnerability to SPA, DPA, etc. at the software level. Exploitation of the PINPAS tool allows for the identification of weaknesses in the implementation in an early stage of development. A toy algorithm is discussed to illustrate the usage of the tool. |
Bibtex: | @inproceedings{HVVVW03:SEC,
official_url = {http://www.wkap.nl/prod/b/1-4020-7449-2}, author = {J. I. {den Hartog} and J. {Verschuren} and E. P. {de Vink} and J. {Vos} and W. {Wiersma}}, num_pages = {5}, editor = {D. {Gritzalis} and S. {De Capitani di Vimercati} and P. {Samarati} and S. K. {Katsikas}}, isbn = {1-4020-7449-2}, address = {Boston, Massachusetts}, publisher = {Kluwer Academic Publishers}, howpublished = {http://eprints.eemcs.utwente.nl/845/}, location = {Athens, Greece}, event_type = {Conference}, booktitle = {18th IFIP TC11 Int. Conf. on Information Security and Privacy in the Age of Uncertainty (SEC), Athens, Greece}, title = {PINPAS: A Tool for Power Analysis of Smartcards}, year = {2003}, pages = {453--457}} |
Abstract: | This paper provides a case-study in the field of metric semantics for probabilistic programming. Both an operational and a denotational semantics are presented for an abstract process language L_pr, which features action refinement and probabilistic choice. The two models are constructed in the setting of complete ultrametric spaces, here based on probability measures of compact support over sequences of actions. It is shown that the standard toolkit for metric semantics works well in the probabilistic context of L_pr, e.g. in establishing the correctness of the denotational semantics with respect to the operational one. In addition, it is shown how the method of proving full abstraction --as proposed recently by the authors for a nondeterministic language with action refinement-- can be adapted to deal with the probabilistic language L_pr as well. |
Bibtex: | @inproceedings{HVB00:entcs,
month = {March}, official_url = {http://dx.doi.org/10.1016/S1571-0661(05)80038-6}, issn = {1571-0661}, author = {J. I. {den Hartog} and E. P. {de Vink} and J. W. {de Bakker}}, num_pages = {28}, series = {Electronic Notes in Theoretical Computer Science}, pres_types = {Talk}, editor = {T. {Hurley} and M. {Mac an Airchinnigh} and M. {Schellekens} and A. {Seda}}, address = {Amsterdam}, publisher = {Elsevier}, id_number = {10.1016/S1571-0661(05)80038-6}, journal = {Electronic Notes in Theoretical Computer Science (ENTCS)}, location = {Cork, Ireland}, howpublished = {http://eprints.eemcs.utwente.nl/964/}, volume = {40}, event_type = {Conference}, booktitle = {MFCSIT2000, The First Irish Conference on the Mathematical Foundations of Computer Science and Information, Cork, Ireland}, title = {Metric Semantics and Full Abstractness for Action Refinement and Probabilistic Choice}, year = {2001}, pages = {72--99}} |
Abstract: | abstract not found |
Bibtex: | @inproceedings{Har99:ASIAN,
author = {Hartog, J.I. den}, title = {Verifying Probabilistic Programs Using a {H}oare like Logic}, editor = {P.S. Thiagarajan and R. Yap}, booktitle = {LNCS 1742 (ASIAN'99)}, publisher = {Springer}, pages = {113-125}, doi = {10.1007/3-540-46674-6_11}, year = 1999} |
Abstract: | For a process language with both nondeterministic and probabilistic choice, and a form of failure a transition system is given from which, in a modular way, various operational models corresponding to various interpretations of nondeterminism and probability can be obtained. The effect of failure of one component for the system as a whole is treated differently in each interpretation. The same approach is followed for an extension of the language with a parallel operator. The adopted concurrency model is of a distributed nature and assumes that progress is guaranteed if nonfailing components exist. To this end the notion of a take-over of a failing component is incorporated in the transition system. It is shown that the modular way in which the transition system can yield different semantical models applies to this setting as well. |
Bibtex: | @inproceedings{HV98:probmiv,
month = {June}, official_url = {http://dx.doi.org/10.1016/S1571-0661(05)82521-6}, issn = {1571-0661}, author = {J. I. {den Hartog} and E. P. {de Vink}}, num_pages = {23}, series = {Electronic Notes in Theoretical Computer Science}, pres_types = {Talk}, address = {Amsterdam}, publisher = {Elsevier}, journal = {Electronic Notes in Theoretical Computer Science (ENTCS)}, location = {Indianapolis, Indiana}, howpublished = {http://eprints.eemcs.utwente.nl/1065/}, volume = {22}, event_type = {Workshop}, event_dates = {June 19-20, 19}, booktitle = {PROBMIV'98, First International Workshop on Probabilistic Methods in Verification, Indianapolis, Indiana}, title = {Mixing Up Nondeterminism and Probability: A Preliminary Report}, year = {1998}, pages = {88--110}} |
Abstract: | For the control flow kernel of or-parallel Prolog with commit an operational and a denotational model are constructed and related using techniques from metric semantics. By maintaining explicit scope information a compositional handling of the commit for the denotational model is established. By application of an abstraction function, which deletes this extra information the operational semantics is recovered. |
Bibtex: | @inproceedings{THV97:ILPS,
month = {October}, author = {E. {Todoran} and J. I. {den Hartog} and E. P. {de Vink} and J. {Maluszynski}}, num_pages = {15}, isbn = {0-262-63180-6}, address = {Boston, Massachusetts}, publisher = {The MIT Press}, howpublished = {http://eprints.eemcs.utwente.nl/1092/}, location = {Long Island, New York}, event_type = {Conference}, booktitle = {Int. Symp. on Logic Programming (ILPS), Long Island, New York}, title = {Comparative Metric Semantics for Commit in Or-Parallel Logic Programming}, year = {1997}, pages = {101--115}} |
Abstract: | abstract not found |
Bibtex: | @phdThesis{Har02:thesis,
author = {Hartog, J.I. den}, title = {Probabilistic Extensions of Semantical Models}, school = {Vrije Universiteit Amsterdam}, year = {2002}} |
Abstract: | Trust is essential in the e-business world: to allow the cooperation needed in this setting, independent service providers have to trust each other and, also, end-users have to trust service providers. Trust Management, i.e. the process of establishing trust amongst the parties involved in a transaction, can be carried out using different approaches, methods and technologies. The end-user is an important party involved in this process. Trust Perception models attempt to understand the end-user’s point of view and the pattern he adopts to trust a service over the Internet. In this chapter the authors provide a state of the art for Trust Management in e-business. They review the most important Trust Management technologies and concepts including credentials and PKI, reputation, authorization and access control, trust policies, and trust languages. A conceptual map is presented clarifying the meaning and the links between different elements of a Trust Management system. Moreover, the authors discuss the end-user’s Trust Perception. The chapter presents a literature study on Trust Perception models and introduces the new model, able to list the trust signals the end-user considers to make trust decision. Examples of such signals can be the reputation of a website, the use of security protocols, the privacy policies adopted, and the look and feel of its user interface. Finally, the directions of future work are presented, and conclusions are drawn. |
Bibtex: | @incollection{CHP12:HRESP,
author = {Costante, E. and Hartog, J.I. den and Petkovi{\'c}, M.}, year = {2012}, title = {Trust management and user's trust perception in e-business (Chapter 14)}, editor ={E. Kajan and F.D. Dorloff and I. Bedini}, booktitle = {Handbook of research on e-business standards and protocols : documents, data and advanced web technologies}, pages = {321--341}, publisher ={IGI Global}, official_url={http://www.igi-global.com/chapter/trust-management-user-trust-perception/63477}} |
Abstract: | Privacy is a prime concern in today's information society. To protect the privacy of individuals, enterprises must follow certain privacy practices, while collecting or processing personal data. In this chapter we look at the setting where an enterprise collects private data on its website, processes it inside the enterprise and shares it with partner enterprises. In particular, we analyse three different privacy systems that can be used in the different stages of this lifecycle. One of them is the Audit Logic, recently introduced, which can be used to keep data private when it travels across enterprise boundaries. We conclude with an analysis of the features and shortcomings of these systems. |
Bibtex: | @incollection{DEH07:SPTMDM,
official_url = {http://dx.doi.org/10.1007/978-3-540-69861-6_25}, author = {M. A. C. {Dekker} and S. {Etalle} and J. I. {den Hartog}}, num_pages = {15}, series = {Data-Centric Systems and Applications}, editor = {M. {Petkovi{\'c}} and W. {Jonker}}, address = {Berlin}, publisher = {Springer Verlag}, id_number = {10.1007/978-3-540-69861-6_25}, isbn_13 = {978-3-540-69860-9}, howpublished = {http://eprints.eemcs.utwente.nl/5236/}, volume = {XVIII}, booktitle = {Security, Privacy and Trust in Modern Data Management}, title = {Privacy Policies}, year = {2007}, pages = {383--397}} |
Abstract: | abstract not found |
Bibtex: | @unpublished{HHT10:MISC,
title = {Combined Trust Managemeent Architecture (Extended Abstract)}, author = {J.I. den Hartog and C. H{\"u}tter and S. Trabelsi}, booktitle = {Proceedings Mobility, Individualisation, Socialisation, Connectivity Conference (MISC) 2010}, year = {2010}, date = {January 20-22 2010}, address = {London}, url = {http://www.eife-l.org/publications/eportfolio/proceedings2/lfl2010/lfl2010proceedings.pdf}, note = {\url{http://www.eife-l.org/publications/eportfolio/proceedings2/lfl2010/lfl2010proceedings.pdf}}} |
Abstract: | Traditionally, medical data is stored and processed using paper-based files. Recently, medical facilities have started to store, access and exchange medical data in digital form. The drivers for this change are mainly demands for cost reduction, and higher quality of health care. The main concerns when dealing with medical data are availability and confidentiality. Unavailability (even temporary) of medical data is expensive. Physicians may not be able to diagnose patients correctly, or they may have to repeat exams, adding to the overall costs of health care. In extreme cases availability of medical data can even be a matter of life or death. On the other hand, confidentiality of medical data is also important. Legislation requires medical facilities to observe the privacy of the patients, and states that patients have a final say on whether or not their medical data can be processed or not. Moreover, if physicians, or their EHR systems, are not trusted by the patients, for instance because of frequent privacy breaches, then patients may refuse to submit (correct) information, complicating the work of the physicians greatly. In traditional data protection systems, confidentiality and availability are conflicting requirements. The more data protection methods are applied to shield data from outsiders the more likely it becomes that authorized persons will not get access to the data in time. Consider for example, a password verification service that is temporarily not available, an access pass that someone forgot to bring, and so on. In this report we discuss a novel approach to data protection, Audit-based Compliance Control (AC2), and we argue that it is particularly suited for application in EHR systems. In AC2, a-priori access control is minimized to the mere authentication of users and objects, and their basic authorizations. More complex security procedures, such as checking user compliance to policies, are performed a-posteriori by using a formal and automated auditing mechanism. To support our claim we discuss legislation concerning the processing of health records, and we formalize a scenario involving medical personnel and a basic EHR system to show how AC2 can be used in practice. This report is based on previous work (Dekker & Etalle 2006) where we assessed the applicability of a-posteriori access control in a health care scenario. A more technically detailed article about AC2 recently appeared in the IJIS journal, where we focussed however on collaborative work environments (Cederquist, Corin, Dekker, Etalle, & Hartog, 2007). In this report we first provide background and related work before explaining the principal components of the AC2 framework. Moreover we model a detailed EHR case study to show its operation in practice. We conclude by discussing how this framework meets current trends in healthcare and by highlighting the main advantages and drawbacks of using an a-posteriori access control mechanism as opposed to more traditional access control mechanisms. |
Bibtex: | @techreport{DHE07:UT,
number = {TR-CTIT-07-46}, issn = {1381-3625}, author = {M. A. C. {Dekker} and J. I. {den Hartog} and S. {Etalle}}, num_pages = {16}, keywords = {Access Control, Electronic Health Records}, address = {Enschede}, publisher = {Centre for Telematics and Information Technology University of Twente}, howpublished = {http://eprints.eemcs.utwente.nl/10742/}, booktitle = {Assimilating Privacy Technologies and Heath Care Compliance}, title = {Audit-based Compliance Control (AC2) for EHR Systems}, type = {Technical Report}, institution = {Centre for Telematics and Information Technology University of Twente}, year = {2007}} |
Abstract: | We present a distributed framework where agents can share data along with usage policies. We use an expressive policy language including conditions, obligations and delegation. Our framework also supports the possibility to refine policies. Policies are not enforced a-priori. Instead policy compliance is checked using an a-posteriri auditing approach. Policy compliance is shown by a (logical) proof that the authority can systematically check for validity. Tools for automatically checking and generating proofs are also part of the framework. |
Bibtex: | @techreport{CCDEHL06:UT,
number = {TR-CTIT-06-33}, issn = {1381-3625}, author = {J. G. {Cederquist} and R. J. {Corin} and M. A. C. {Dekker} and S. {Etalle} and J. I. {den Hartog} and G. {Lenzini}}, num_pages = {14}, address = {Enschede}, publisher = {Centre for Telematics and Information Technology University of Twente}, journal = {CTIT Technical Report}, howpublished = {http://eprints.eemcs.utwente.nl/2773/}, title = {The Audit Logic: Policy Compliance in Distributed Systems}, type = {Technical Report}, institution = {Centre for Telematics and Information Technology University of Twente}, year = {2006}} |
Abstract: | Privacy is a prime concern in today's information society. To protect the privacy of individuals, enterprises must follow certain privacy practices, while collecting or processing personal data. In this chapter we look at the setting where an enterprise collects private data on its website, processes it inside the enterprise and shares it with partner enterprises. In particular, we analyse three different privacy systems that can be used in the different stages of this lifecycle. One of them is the Audit Logic, recently introduced, which can be used to keep data private when it travels across enterprise boundaries. We conclude with an analysis of the features and shortcomings of these systems. |
Bibtex: | @techreport{DEH06:UT,
number = {TR-CTIT-06-16}, month = {April}, official_url = {http://www.ub.utwente.nl/webdocs/ctit/1/0000016a.pdf}, issn = {1381-3625}, author = {M. A. C. {Dekker} and S. {Etalle} and J. I. {den Hartog}}, address = {Enschede}, publisher = {Centre for Telematics and Information Technology University of Twente}, howpublished = {http://eprints.eemcs.utwente.nl/5700/}, title = {Privacy in an Ambient World}, type = {Technical Report}, institution = {Centre for Telematics and Information Technology University of Twente}, year = {2006}} |
Abstract: | abstract not found |
Bibtex: | @techreport{MPAVPH07:TUE,
author = {Mostowski, W. and Pan, J. and Akkiraju, S. and Vink, E.P. de and Poll, E. and Hartog, J.I. den}, title = {A comparison of Java Cards : state-of-affairs 2006}, year = {2006}, number = {CSR 07-06}, address = {Eindhoven}, publisher = {Eindhoven University of Technology}, institution = {Eindhoven University of Technology}, pages = {64pp}} |
Abstract: | This paper presents the results from a power analysis of the AES and RSA algorithms by simulation using the PINPAS tool. The PINPAS tool is capable of simulating the power consumption of assembler programs implemented in, amongst others, Hitachi H8/300 assembler. The Hitachi H8/300 is a popular CPU for smartcards. Using the PINPAS tool, the vulnerability for power analysis attacks of straightforward AES and RSA implementations is examined. In case a vulnerability is found countermeasures are added to the implementation that attempt to counter power analysis attacks. After these modifications the analysis is performed again and the new results are compared to the original results. |
Bibtex: | @techreport{HBH04:TUE,
number = {CSR 04-22}, official_url = {http://library.tue.nl/catalog/LinkToVubis.csp?language=dut&DataBib=6:580553}, author = {G. {Hollestelle} and W. {Burgers} and J. I. {den Hartog}}, address = {Eindhoven}, publisher = {Eindhoven University of Technology}, howpublished = {http://eprints.eemcs.utwente.nl/798/}, title = {Power analysis on smartcard algorithms using simulation}, type = {Technical report}, institution = {Eindhoven University of Technology}, year = {2004}} |
Abstract: | abstract not found |
Bibtex: | @incollection{HV02:LA,
author = {Hartog, J.I. den and Vink, E.P. de}, title = {Building metric structures with the Maes-factor}, editor = {Boer, F.S. de and Heijden, M. van der and P. Klint}, booktitle = {Liber amicorum Jaco de Bakker}, pages = {93--107}, address = {Amsterdam}, publisher = {CWI}, year = {2002}} |
Abstract: | A comparative semantics is given for a language Lprob containing probabilistic and non-deterministic choice. The effects of interpreting these operators as local or global are investigated. For three of the possible combinations an operational model and a denotational model are given and compared. First models for local probabilistic choice and local non-deterministic choice are given using a generative approach. By adjusting these models slightly models for global probability and local non-determinism are obtained. Finally models for local probability and global non-determinism are presented using a stratified approach. For use with the denotational models a construction of a complete ultra-metric space of finite multisets is given. |
Bibtex: | @techreport{Har98:vu,
number = {IR-445}, howpublished = {http://eprints.eemcs.utwente.nl/1068/}, author = {J. I. {den Hartog}}, type = {Technical Report}, address = {Amsterdam}, title = {Comparative semantics for a process language with probabilistic choice and non-determinism}, publisher = {Vrije Universiteit}, year = {1998}, institution = {Vrije Universiteit}} |