journals.bib

@comment{{This file has been generated by bib2bib 1.96}}
@comment{{Command line: bib2bib -oc journals.cit -ob journals.bib -c $key:"IJ$" 2017.bib 2016.bib 2015.bib 2014.bib 2013.bib 2012.bib 2011.bib 2010.bib 2009.bib 2008.bib 2007.bib 2006.bib 2005.bib 2004.bib 2003.bib 2002.bib 2001.bib 2000.bib 1999.bib 1998.bib 1997.bib 1996.bib}}
@article{RigCotBel17-IJAR-IJ,
  author = {Fabrizio Riguzzi and Giuseppe Cota and
        Elena Bellodi and Riccardo Zese  },
  title = {Causal Inference in {cplint}},
  journal = {International Journal of Approximate Reasoning},
  year = {2017},
  publisher = {Elsevier},
  address = {Amsterdam},
  copyright = {Elsevier},
  url = {http://ds.ing.unife.it/~friguzzi/Papers/RigCotBel-IJAR17.pdf},
  abstract = {
cplint is a suite of programs for reasoning and learning with Probabilistic Logic
Programming languages that follow the distribution semantics.
In this paper we describe how we have extended cplint to perform causal reasoning.
In particular, we consider Pearl's do calculus for models where all
the variables are measured.
The two cplint  modules for inference, PITA and MCINTYRE, have been extended for
computing the effect of actions/interventions on these models.
We also executed experiments comparing exact and approximate inference with
conditional and causal queries, showing that causal inference is often cheaper than conditional inference.
},
  keywords = {
Probabilistic Logic Programming, Distribution Semantics, Logic Programs with Annotated Disjunctions, ProbLog, Causal Inference, Statistical Relational Artificial Intelligence
}
}
@article{AlbBelCot17-IA-IJ,
  author = {Marco Alberti and Elena Bellodi and Giuseppe Cota and
  Fabrizio Riguzzi and Riccardo Zese},
  title = {\texttt{cplint} on {SWISH}: Probabilistic Logical Inference with a Web Browser},
  journal = {Intelligenza Artificiale},
  publisher = {IOS Press},
  copyright = {IOS Press},
  year = {2017},
  issn-print = {1724-8035},
  issn-online = {2211-0097},
  url = {http://ds.ing.unife.it/~friguzzi/Papers/AlbBelCot-IA17.pdf},
  abstract = {
\texttt{cplint} on SWISH is a web application that allows users to
perform reasoning tasks on probabilistic logic programs.
Both inference and learning systems can be performed: conditional probabilities with exact,
rejection sampling and Metropolis-Hasting methods. Moreover, the system now allows hybrid programs,
i.e., programs where some of the random variables are continuous. To perform inference on such programs likelihood weighting and particle filtering are used.
\texttt{cplint} on SWISH is also able to sample goals' arguments and
to graph the results. This paper reports on advances and new features
of \texttt{cplint} on SWISH, including the capability of drawing the
binary decision diagrams created during the inference processes.
},
  keywords = { Logic Programming, Probabilistic Logic Programming,
Distribution Semantics, Logic Programs with Annotated Disjunctions, Web
Applications
},
  volume = {11},
  number = {1},
  doi = {10.3233/IA-170106},
  pages = {47--64},
  wos = {WOS:000399736500004}
}
@article{BelLamRig17-SPE-IJ,
  author = {Elena Bellodi and Evelina Lamma and Fabrizio Riguzzi and
  Riccardo Zese and Giuseppe Cota},
  title = {A web system for reasoning with probabilistic {OWL}},
  journal = {Software: Practice and Experience},
  publisher = {Wiley},
  copyright = {Wiley},
  year = {2017},
  doi = {10.1002/spe.2410},
  issn = {1097-024X},
  month = {January},
  pages = {125--142},
  volume = {47},
  number = {1},
  scopus = {2-s2.0-84992412060},
  url = {http://ds.ing.unife.it/~friguzzi/Papers/BelLamRig-SPE16.pdf},
  abstract = {
We present the web application TRILL on SWISH, which allows the user to write probabilistic Description Logic (DL) theories and compute the probability of queries with just a web browser.
Various probabilistic extensions of DLs have been proposed  in the recent past, since uncertainty is a fundamental component of the Semantic Web.
We consider probabilistic DL theories following our DISPONTE semantics.  Axioms of a DISPONTE Knowledge Base (KB) can be annotated with a probability and the probability of queries can be computed with inference algorithms.
TRILL is a probabilistic reasoner for DISPONTE KBs that is implemented in Prolog  and exploits its backtracking facilities for handling the non-determinism of the tableau algorithm.
TRILL on SWISH is based on SWISH, a recently proposed web framework for logic programming, based on various features and packages of SWI-Prolog (e.g., a web server and a library for creating remote Prolog engines and  posing queries to them).  TRILL on SWISH also allows users to cooperate in writing a probabilistic DL theory.
It is free, open, and accessible on the Web at the url: \trillurl; it includes a number of examples that cover a wide range of domains and provide interesting Probabilistic Semantic Web applications.
By building a web-based system, we allow users to experiment with Probabilistic DLs without the need to install a complex software stack. In this way we aim to reach out to a wider audience and popularize the Probabilistic Semantic Web.
},
  keywords = { Semantic Web, Web Applications, Description Logics, Probabilistic Description Logics, SWI-Prolog, Logic Programming
}
}
@article{RigBelZes17-IJAR-IJ,
  author = {Fabrizio Riguzzi and
        Elena Bellodi and Riccardo Zese and
        Giuseppe Cota and
        Evelina Lamma },
  title = {A Survey of Lifted Inference Approaches for Probabilistic
Logic Programming under the Distribution Semantics},
  journal = {International Journal of Approximate Reasoning},
  year = {2017},
  publisher = {Elsevier},
  address = {Amsterdam},
  copyright = {Elsevier},
  doi = {10.1016/j.ijar.2016.10.002},
  pdf = {http://authors.elsevier.com/a/1Tw7F,KD6ZCKEe},
  url = {http://ds.ing.unife.it/~friguzzi/Papers/RigBelZes-IJAR17.pdf},
  volume = {80},
  number = {},
  issn = {0888-613X},
  pages = {313--333},
  month = {January},
  abstract = {
Lifted inference aims at answering queries from statistical relational models by reasoning on populations of individuals as a
whole instead of considering each individual singularly.
Since the initial proposal by David Poole in 2003, many lifted inference techniques have appeared, by lifting different algorithms or using approximation involving different kinds of models, including parfactor graphs and Markov Logic Networks.
Very recently lifted inference was applied to Probabilistic Logic Programming (PLP) under the distribution semantics, with proposals such as LP2 and Weighted First-Order Model Counting
(WFOMC). Moreover, techniques for dealing with aggregation parfactors can be directly applied to PLP.
In this paper we survey these approaches and present an
experimental comparison on five models.
The results show that  WFOMC outperforms the other approaches, being able to exploit more symmetries.
},
  keywords = {Probabilistic Logic Programming, Lifted Inference, Variable Elimination, Distribution Semantics, ProbLog, Statistical Relational Artificial Intelligence
},
  scopus = {2-s2.0-84992199737},
  wos = {WOS:000391080100020}
}
@article{ZesBelRig16-AMAI-IJ,
  author = {Riccardo Zese and
        Elena Bellodi  and
        Fabrizio Riguzzi and
        Giuseppe Cota and
        Evelina Lamma },
  title = {Tableau Reasoning for Description Logics and its Extension to Probabilities},
  journal = {Annals of Mathematics and Artificial Intelligence},
  publisher = {Springer},
  copyright = {Springer},
  year = {2016},
  issn-print = {1012-2443},
  issn-online = {1573-7470},
  url = {http://ds.ing.unife.it/~friguzzi/Papers/ZesBelRig-AMAI16.pdf},
  pdf = {http://rdcu.be/kONG},
  doi = {10.1007/s10472-016-9529-3},
  abstract = {
The increasing popularity of the Semantic Web drove to a wide-
spread adoption of Description Logics (DLs) for modeling real world domains.
To help the diffusion of DLs, a large number of reasoning algorithms have been
developed. Usually these algorithms are implemented in procedural languages
such as Java or C++. Most of the reasoners exploit the tableau algorithm
which features non-determinism, that is not easily handled by those languages.
Prolog directly manages non-determinism, thus is a good candidate for dealing
with the tableau's non-deterministic expansion rules.
We present TRILL, for "Tableau Reasoner for descrIption Logics in pro-
Log", that implements a tableau algorithm and is able to return explanations
for queries and their corresponding probability, and TRILLP , for "TRILL
powered by Pinpointing formulas", which is able to compute a Boolean for-
mula representing the set of explanations for a query. Reasoning on real world
domains also requires the capability of managing probabilistic and uncertain
information. We show how TRILL and TRILLP can be used to compute the
probability of queries to knowledge bases following DISPONTE semantics.
Experiments comparing these with other systems show the feasibility of the
approach.},
  keywords = { Description Logics, Tableau, Prolog, Semantic Web},
  scopus = {2-s2.0-84990986085}
}
@article{RigBelLam16-SPE-IJ,
  author = {Fabrizio Riguzzi and Elena Bellodi and Evelina Lamma and
  Riccardo Zese and Giuseppe Cota},
  title = {Probabilistic Logic Programming on the Web},
  journal = {Software: Practice and Experience},
  publisher = {Wiley},
  copyright = {Wiley},
  year = {2016},
  issn = {1097-024X},
  url = {http://ds.ing.unife.it/~friguzzi/Papers/RigBelLam-SPE16.pdf},
  abstract = {
We present the web application "cplint on SWISH", that allows the user
to write probabilistic logic programs and compute the probability of queries
with just a web browser. The application is based on SWISH, a recently
proposed web framework for logic programming. SWISH is based on various
features and packages of SWI-Prolog, in particular its web server and
its Pengine library, that allow to create remote Prolog engines and to pose
queries to them. In order to develop the web application, we started from
the PITA system which is included in cplint, a suite of programs for reasoning
on Logic Programs with Annotated Disjunctions, by porting PITA
to SWI-Prolog. Moreover, we modified the PITA library so that it can be
executed in a multi-threading environment. Developing "cplint on SWISH"
also required modification of the JavaScript SWISH code that creates and
queries Pengines. "cplint on SWISH" includes a number of examples that
cover a wide range of domains and provide interesting applications of Probabilistic
Logic Programming (PLP). By providing a web interface to cplint
we allow users to experiment with PLP without the need to install a system,
a procedure which is often complex, error prone and limited mainly to the
Linux platform. In this way, we aim to reach out to a wider audience and
popularize PLP.},
  keywords = { Logic Programming, Probabilistic Logic Programming,
Distribution Semantics, Logic Programs with Annotated Disjunctions, Web
Applications
},
  doi = {10.1002/spe.2386},
  volume = {46},
  number = {10},
  pages = {1381-1396},
  month = {October},
  wos = {WOS:000383624900005},
  scopus = {2-s2.0-84951829971}
}
@article{Rig16-IJAR-IJ,
  author = {Fabrizio Riguzzi},
  title = {The Distribution Semantics for Normal Programs with Function Symbols},
  journal = {International Journal of Approximate Reasoning},
  year = {2016},
  publisher = {Elsevier},
  address = {Amsterdam},
  doi = {10.1016/j.ijar.2016.05.005},
  volume = {77},
  number = {},
  pages = {1 - 19},
  issn = {0888-613X},
  month = {October},
  pdf = {http://authors.elsevier.com/a/1TBE1,KD6ZCJ~x},
  url = {http://ds.ing.unife.it/~friguzzi/Papers/Rig-IJAR16.pdf},
  copyright = {Elsevier},
  abstract = {The distribution semantics integrates logic programming and probability theory using a possible worlds approach.
Its intuitiveness and simplicity has made it the most
widely used semantics for probabilistic logic programming,
with successful applications in many domains.
When the program has function symbols, the semantics was defined for special cases: either the program has to be definite or the queries must have a finite number of finite explanations.
In this paper we show that it is possible to define the semantics for all programs. We also show that this definition coincides with that of Sato and Kameya on positive programs.
Moreover, we highlight possible approaches for inference, both exact and
approximate.
},
  keywords = {Distribution Semantics, Function Symbols,
ProbLog,
Probabilistic Logic Programming
},
  wos = {WOS:000381164500001},
  scopus = {}
}
@article{BelRigLam16-IDA-IJ,
  author = {Elena Bellodi and Fabrizio Riguzzi and Evelina Lamma},
  title = {Statistical Relational Learning for Workflow Mining},
  journal = {Intelligent Data Analysis},
  publisher = {IOS Press},
  copyright = {IOS Press},
  year = {2016},
  doi = {10.3233/IDA-160818},
  month = {April},
  volume = {20},
  number = {3},
  pages = {515-541},
  url = {http://ds.ing.unife.it/~friguzzi/Papers/BelRigLam-IDA15.pdf},
  keywords = {Workflow Mining, Process Mining, Knowledge-based Process Models, Inductive Logic Programming, Statistical Relational Learning,
Business Process Management
},
  abstract = {
The management of business processes can support  efficiency improvements in organizations. One of the most interesting problems is the mining and representation of process models in a declarative language.
Various recently proposed knowledge-based languages showed advantages over graph-based procedural notations.
Moreover, rapid changes of the environment require organizations to check how compliant are new process instances with the deployed models.
We present a Statistical Relational Learning approach to Workflow Mining that takes into account both flexibility and uncertainty in real environments.
It performs  automatic discovery of  process models expressed in a probabilistic logic.
It  uses the existing DPML  algorithm  for extracting  first-order logic constraints from process logs. The constraints are then translated into Markov Logic to learn their weights.
Inference on the resulting Markov Logic  model allows a probabilistic classification of test traces, by assigning them the probability of being compliant to the model.
We applied this approach to three datasets and compared it with DPML alone, five Petri net- and EPC-based process mining algorithms and Tilde.
The technique is able to better classify new execution traces, showing higher  accuracy and areas under the PR/ROC curves in most cases.
},
  scopus = {2-s2.0-84969808336},
  wos = {WOS:000375005000004}
}
@article{DiMBelRig15-ML-IJ,
  author = {Di Mauro, Nicola  and Elena Bellodi and Fabrizio Riguzzi},
  title = {Bandit-Based {Monte-Carlo} Structure Learning of
Probabilistic Logic Programs},
  journal = {Machine Learning},
  publisher = {Springer International Publishing},
  copyright = {Springer International Publishing},
  year = {2015},
  volume = {100},
  number = {1},
  pages = {127-156},
  month = {July},
  doi = {10.1007/s10994-015-5510-3},
  url = {http://ds.ing.unife.it/~friguzzi/Papers/DiMBelRig-ML15.pdf},
  keywords = {probabilistic inductive logic programming, statistical relational learning, structure learning, distribution semantics, logic programs with annotated disjunction},
  abstract = {Probabilistic Logic Programming can be used to model domains with complex and uncertain relationships among entities. While the problem of learning the
parameters of such programs has been considered by various authors, the problem
of learning the structure is yet to be explored in depth. In this work we present an
approximate search method based on a one-player game approach, called LEMUR. It
sees the problem of learning the structure of a probabilistic logic program as a multiarmed bandit problem, relying on the Monte-Carlo tree search UCT algorithm that
combines the precision of tree search with the generality of random sampling. LEMUR
works by modifying the UCT algorithm in a fashion similar to FUSE, that considers a
finite unknown horizon and deals with the problem of having a huge branching factor.
The proposed system has been tested on various real-world datasets and has shown
good performance with respect to other state of the art statistical relational learning
approaches in terms of classification abilities.},
  note = {The original publication is available at
\url{http://link.springer.com}}
}
@article{RigBelLamZes15-SW-IJ,
  author = {Fabrizio Riguzzi and Elena Bellodi and Evelina Lamma and Riccardo Zese},
  title = {Probabilistic Description Logics under the Distribution Semantics},
  journal = {Semantic Web - Interoperability, Usability, Applicability},
  volume = {6},
  number = {5},
  pages = {447-501},
  pdf = {http://ds.ing.unife.it/~friguzzi/Papers/RigBelLamZes-SW14.pdf},
  year = {2015},
  doi = {10.3233/SW-140154},
  abstract = {
Representing uncertain information is crucial for modeling real world domains. In this paper we present a technique for the integration of probabilistic information in Description Logics (DLs) that is based on the distribution semantics for probabilistic logic programs. In the resulting approach, that we called DISPONTE, the axioms of a probabilistic knowledge base
(KB) can be annotated with a real number between 0 and 1. A probabilistic knowledge base then defines a probability
distribution over regular KBs called worlds and the probability of a given query can be obtained from the joint distribution of the worlds and the query by marginalization.
We present the algorithm BUNDLE for computing the probability of queries from DISPONTE KBs. The algorithm exploits an underlying  DL reasoner, such as Pellet, that is able to return explanations for  queries. The explanations are encoded in a Binary Decision Diagram from which the probability of the query is computed.
The experimentation of BUNDLE shows that it can handle probabilistic KBs of realistic size.
},
  keywords = { Probabilistic Ontologies, Probabilistic Description Logics, OWL, Probabilistic Logic Programming, Distribution Semantics}
}
@article{BelRig15-TPLP-IJ,
  author = {Elena Bellodi and Fabrizio Riguzzi},
  title = {Structure Learning of Probabilistic Logic Programs by Searching the Clause Space},
  journal = {Theory and Practice of Logic Programming},
  publisher = {Cambridge University Press},
  copyright = {Cambridge University Press},
  year = {2015},
  volume = {15},
  number = {2},
  pages = {169-212},
  pdf = {http://arxiv.org/abs/1309.2080},
  url = {http://journals.cambridge.org/abstract_S1471068413000689},
  doi = {10.1017/S1471068413000689},
  keywords = {probabilistic inductive logic programming, statistical relational learning, structure learning, distribution semantics, logic programs with annotated disjunction, CP-logic},
  abstract = {Learning probabilistic logic programming languages is receiving an increasing attention,
and systems are available for learning the parameters (PRISM, LeProbLog, LFI-ProbLog
and EMBLEM) or both structure and parameters (SEM-CP-logic and SLIPCASE) of these
languages. In this paper we present the algorithm SLIPCOVER for "Structure LearnIng
of Probabilistic logic programs by searChing OVER the clause space." It performs a beam
search in the space of probabilistic clauses and a greedy search in the space of theories
using the log likelihood of the data as the guiding heuristics. To estimate the log likelihood,
SLIPCOVER performs Expectation Maximization with EMBLEM. The algorithm has been
tested on five real world datasets and compared with SLIPCASE, SEM-CP-logic, Aleph and
two algorithms for learning Markov Logic Networks (Learning using Structural Motifs (LSM)
and ALEPH++ExactL1). SLIPCOVER achieves higher areas under the precision-recall and
receiver operating characteristic curves in most cases.}
}
@article{RigBelZes14-FAI-IJ,
  author = {Riguzzi, Fabrizio  and  Bellodi, Elena  and  Zese, Riccardo},
  title = {A History of Probabilistic Inductive Logic Programming},
  journal = {Frontiers in Robotics and AI},
  volume = {1},
  year = {2014},
  number = {6},
  url = {http://www.frontiersin.org/computational_intelligence/10.3389/frobt.2014.00006/abstract},
  doi = {10.3389/frobt.2014.00006},
  issn = {2296-9144},
  abstract = {The field of Probabilistic Logic Programming (PLP) has seen significant advances in the last 20?years, with many proposals for languages that combine probability with logic programming. Since the start, the problem of learning probabilistic logic programs has been the focus of much attention. Learning these programs represents a whole subfield of Inductive Logic Programming (ILP). In Probabilistic ILP (PILP), two problems are considered: learning the parameters of a program given the structure (the rules) and learning both the structure and the parameters. Usually, structure learning systems use parameter learning as a subroutine. In this article, we present an overview of PILP and discuss the main results.},
  pages = {1-5},
  keywords = {logic programming, probabilistic programming, inductive logic programming, probabilistic logic
programming, statistical relational learning},
  copyright = {by the authors}
}
@article{RigSwi14-TOCL-IJ,
  author = { Fabrizio Riguzzi and Terrance Swift},
  title = {Terminating Evaluation of Logic Programs with Finite Three-Valued
  Models},
  journal = {ACM Transactions on Computational Logic},
  publisher = {ACM},
  copyright = {ACM},
  volume = {15},
  number = {4},
  year = {2014},
  doi = {10.1145/2629337},
  abstract = {
As evaluation methods for logic programs have become more sophisticated, the classes of programs for which
termination can be guaranteed have expanded. From the perspective of answer set programs that include
function symbols, recent work has identified classes for which grounding routines can terminate either on
the entire program [Calimeri et al. 2008] or on suitable queries [Baselice et al. 2009]. From the perspective
of tabling, it has long been known that a tabling technique called subgoal abstraction provides good termination properties for definite programs [Tamaki and Sato 1986], and this result was recently extended
to stratified programs via the class of bounded term-size programs [Riguzzi and Swift 2013]. In this paper
we provide a formal definition of tabling with subgoal abstraction resulting in the SLG
SA algorithm. Moreover, we discuss a declarative characterization of the queries and programs for which SLG
SA terminates. We
call this class strongly bounded term-size programs and show its equivalence to programs with finite wellfounded models. For normal programs strongly bounded term-size programs strictly includes the finitely
ground programs of [Calimeri et al. 2008]. SLG
SA has an asymptotic complexity on strongly bounded termsize programs equal to the best known and produces a residual program that can be sent to an answer set
programming system. Finally, we describe the implementation of subgoal abstraction within the SLG-WAM
of XSB and provide performance results.},
  keywords = {Logic Programming, Tabled Logic Programming, Termination},
  http = {http://dl.acm.org/authorize?N05388},
  pdf = {http://ds.ing.unife.it/~friguzzi/Papers/RigSwi14-TOCL.pdf},
  pages = {32:1--32:38},
  month = {September},
  issn = {1529-3785},
  address = {New York, NY, USA}
}
@article{BelLamRig14-ICLP-IJ,
  author = { Elena Bellodi and Evelina Lamma and Fabrizio Riguzzi and Santos Costa, Vitor and Riccardo Zese},
  title = {Lifted Variable Elimination for Probabilistic Logic Programming},
  journal = {Theory and Practice of Logic Programming},
  publisher = {Cambridge University Press},
  copyright = {Cambridge University Press},
  number = {Special issue 4-5 - ICLP 2014},
  volume = {14},
  year = {2014},
  pages = {681-695},
  doi = {10.1017/S1471068414000283},
  pdf = {http://arxiv.org/abs/1405.3218},
  keywords = {Probabilistic Logic Programming, Lifted Inference,
  Variable Elimination, Distribution Semantics, ProbLog,
  Statistical Relational Artificial Intelligence},
  abstract = {Lifted inference has been proposed for various probabilistic logical
  frameworks in order to compute the probability of queries in a time that
  depends on the size of the domains of the random variables rather than the
  number of instances. Even if various authors have underlined its importance
  for probabilistic logic programming (PLP), lifted inference has been applied
  up to now only to relational languages outside of logic programming. In this
  paper we adapt Generalized Counting First Order Variable Elimination (GC-FOVE)
  to the problem of computing the probability of queries to probabilistic logic
  programs under the distribution semantics. In particular, we extend the Prolog
  Factor Language (PFL) to include two new types of factors that are needed for
  representing ProbLog programs. These factors take into account the existing
  causal independence relationships among random variables and are managed by
  the extension to variable elimination proposed by Zhang and Poole for dealing
  with convergent variables and heterogeneous factors. Two new operators are
  added to GC-FOVE for treating heterogeneous factors. The resulting algorithm,
  called LP2 for Lifted Probabilistic Logic Programming, has been implemented
  by modifying the PFL implementation of GC-FOVE and tested on three benchmarks
  for lifted inference. A comparison with PITA and ProbLog2 shows the potential
  of the approach.},
  isi = {000343203200019},
  scopus = {84904624147}
}
@article{Rig14-CJ-IJ,
  author = {Fabrizio Riguzzi},
  title = {Speeding Up Inference for Probabilistic Logic Programs},
  journal = { The Computer Journal},
  publisher = {Oxford University Press},
  copyright = {Oxford University Press},
  year = {2014},
  volume = {57},
  number = {3},
  pages = {347-363},
  pdf = {http://ds.ing.unife.it/~friguzzi/Papers/Rig-CJ13.pdf},
  url = {http://comjnl.oxfordjournals.org/cgi/reprint/bxt096?ijkey=TQc2Z3XTWX2P8ez&keytype=ref},
  doi = {10.1093/comjnl/bxt096},
  abstract = {Probabilistic Logic Programming (PLP) allows to represent domains containing many entities connected by uncertain relations and has many applications in particular in Machine Learning.
PITA is a PLP algorithm for computing the probability of queries that exploits tabling, answer subsumption and Binary Decision Diagrams (BDDs). PITA does not impose any restriction on the programs. Other algorithms, such as PRISM, reduce computation time by imposing restrictions on the program, namely that subgoals are independent and that clause bodies are mutually exclusive. Another assumption that simplifies inference is that clause bodies are independent. In this paper we  present the algorithms PITA(IND,IND) and PITA(OPT). PITA(IND,IND) assumes that subgoals and clause bodies are independent. PITA(OPT) instead first checks whether these assumptions hold for subprograms and subgoals: if they do, PITA(OPT) uses a simplified calculation, otherwise it resorts to BDDs. Experiments on a number of benchmark datasets show that PITA(IND,IND) is the fastest on datasets respecting the assumptions while PITA(OPT) is a good option when nothing is known about a dataset.},
  keywords = {Logic Programming, Probabilistic Logic Programming, Distribution Semantics, Logic Programs with Annotated Disjunctions, PRISM, ProbLog}
}
@article{DiMFraEta13-IA-IJ,
  author = {Nicola Di Mauro and
               Paolo Frasconi and
               Fabrizio Angiulli and
               Davide Bacciu and
               Marco de Gemmis and
               Floriana Esposito and
               Nicola Fanizzi and
               Stefano Ferilli and
               Marco Gori and
               Francesca A. Lisi and
               Pasquale Lops and
               Donato Malerba and
               Alessio Micheli and
               Marcello Pelillo and
               Francesco Ricci and
               Fabrizio Riguzzi and
               Lorenza Saitta and
               Giovanni Semeraro},
  title = {Italian Machine Learning and Data Mining research: The last
               years},
  journal = {Intelligenza Artificiale},
  volume = {7},
  number = {2},
  year = {2013},
  pages = {77-89},
  doi = {10.3233/IA-130050},
  copyright = {{IOS} Press},
  publisher = {{IOS} Press},
  abstract = {With the increasing amount of information in electronic form the fields of Machine Learning and Data Mining continue to grow by providing new advances in theory, applications and systems. The aim of this paper is to consider some recent theoretical aspects and approaches to ML and DM with an emphasis on the Italian research.}
}
@article{Rig13-FI-IJ,
  author = {Fabrizio Riguzzi},
  title = {{MCINTYRE}: A {Monte Carlo} System for Probabilistic Logic Programming},
  journal = {Fundamenta Informaticae},
  abstract = {Probabilistic Logic Programming is receiving an increasing attention for its ability to model domains with complex and uncertain relations among entities. 
In this paper we concentrate on the problem of approximate inference in probabilistic logic programming languages based on the distribution semantics.
A successful approximate approach is based on Monte Carlo sampling, that consists in verifying the truth of the query in a normal program sampled from the probabilistic program.
The ProbLog system includes such an algorithm and so does the cplint suite.
In this paper we propose an approach for Monte Carlo inference that is based on a program transformation that translates a probabilistic program into a normal program to which the query can be posed.  The current sample is stored in the internal database of the Yap Prolog engine.
The resulting system, called MCINTYRE for Monte Carlo INference wiTh Yap REcord, is evaluated on various problems: biological networks, artificial datasets and a hidden Markov model.  MCINTYRE is compared with the Monte Carlo algorithms of ProbLog and  and with the  exact inference  of the PITA system. The results show  that MCINTYRE is faster than the other Monte Carlo systems.},
  keywords = {Probabilistic Logic Programming,
Monte Carlo Methods,
Logic Programs with Annotated Disjunctions,
ProbLog},
  year = {2013},
  publisher = {{IOS} Press},
  pdf = {http://ds.ing.unife.it/~friguzzi/Papers/Rig13-FI-IJ.pdf},
  doi = {10.3233/FI-2013-847},
  volume = {124},
  number = {4},
  pages = {521-541},
  copyright = {IOS Press}
}
@article{BelRig13-IDA-IJ,
  author = {Elena Bellodi and Fabrizio Riguzzi},
  title = { Expectation {Maximization} over Binary Decision Diagrams for Probabilistic Logic Programs},
  year = {2013},
  volume = {17},
  number = {2},
  journal = {Intelligent Data Analysis},
  publisher = {IOS Press},
  copyright = {IOS Press},
  pages = {343-363},
  doi = {10.3233/IDA-130582},
  url = {http://iospress.metapress.com/content/k1wu917722636526/?issue=2&genre=article&spage=343&issn=1088-467X&volume=17},
  pdf = {http://ds.ing.unife.it/~friguzzi/Papers/BelRig13-IDA-IJ.pdf},
  abstract = {Recently much work in Machine Learning has concentrated on using expressive representation languages that combine aspects of logic and probability. A whole field has emerged, called Statistical Relational Learning, rich of successful applications in a variety of domains.
In this paper we present a Machine Learning technique targeted to Probabilistic Logic Programs, a family of formalisms where uncertainty is represented using Logic Programming tools.
Among various proposals for Probabilistic Logic Programming, the one based on the distribution semantics is gaining popularity and is the basis for languages such as ICL, PRISM, ProbLog and Logic Programs with Annotated Disjunctions.
This paper proposes a technique for learning parameters of these languages. Since their equivalent Bayesian networks contain hidden variables, an Expectation Maximization (EM) algorithm is adopted.
In order to speed the computation up, expectations are computed directly on the Binary Decision Diagrams that are built for inference.
The resulting system, called EMBLEM for ``EM over Bdds for probabilistic Logic programs Efficient Mining'', has been applied to a number of datasets and showed good performances both in terms of speed and memory usage. In particular its speed allows the execution of a high number of restarts, resulting in good  quality of the solutions.},
  keywords = {Statistical Relational Learning, Probabilistic Inductive Logic Programming, Probabilistic Logic Programs, Logic Programs with Annotated Disjunctions, Expectation Maximization, Binary Decision Diagrams
}
}
@article{RigSwi13-TPLP-IJ,
  author = {Fabrizio Riguzzi and Terrance Swift},
  title = {Well\--Definedness and Efficient Inference for Probabilistic Logic Programming under the Distribution Semantics },
  year = {2013},
  month = {March},
  journal = {Theory and Practice of Logic Programming},
  editor = { Wolfgang Faber and Nicola Leone},
  publisher = {Cambridge University Press},
  copyright = {Cambridge University Press},
  keywords = {Probabilistic Logic Programming, Possibilistic Logic Programming, Tabling, Answer Subsumption, Program Transformation},
  abstract = {The distribution semantics is one of the most prominent approaches for the combination of logic programming and probability theory. Many languages follow this semantics, such as Independent Choice Logic, PRISM, pD, Logic Programs with Annotated Disjunctions (LPADs)  and ProbLog. 

When a program contains functions symbols, the distribution semantics
is well\--defined only if the set of explanations for a query is
finite and so is each explanation. Well\--definedness is usually
either explicitly imposed or is achieved by severely limiting the
class of allowed programs.
In this paper we identify a larger class of programs for which the
semantics is well\--defined together with an efficient procedure for
computing the probability of queries.
Since LPADs offer the most general syntax, we present our results for
them, but our results are applicable to all languages under the
distribution semantics.

We present the algorithm ``Probabilistic Inference with Tabling and
Answer subsumption'' (PITA) that computes the probability of
queries by transforming a probabilistic program into a normal program
and then applying SLG resolution with answer subsumption.
PITA has been implemented in XSB and tested on six domains: two
with function symbols and four without.  The execution times are
compared with those of ProbLog, cplint and
CVE. PITA was almost always able to solve larger problems in a
shorter time, on domains with and without function symbols.},
  keywords = {Probabilistic Logic Programming, Tabling, Answer Subsumption, Logic Programs with Annotated Disjunction, Program Transformation},
  doi = {10.1017/S1471068411000664},
  arxiv = {1110.0631},
  pages = {279-302},
  volume = {13},
  number = {Special Issue 02 - 25th Annual GULP Conference},
  scopus = {84874625061},
  isi = {000315867300007},
  url = {http://arxiv.org/pdf/1110.0631v1}
}
@article{BelRig12-IA-IJ,
  author = {Elena Bellodi and Fabrizio Riguzzi},
  title = { Experimentation of an Expectation Maximization Algorithm for Probabilistic Logic Programs},
  year = {2012},
  journal = {Intelligenza Artificiale},
  publisher = {IOS Press},
  copyright = {IOS Press},
  pdf = {http://ds.ing.unife.it/~friguzzi/Papers/BelRig12-IA-IJ.pdf},
  abstract = {Statistical Relational Learning and Probabilistic Inductive Logic Programming are two emerging fields that use representation languages able to combine logic and probability. In the field of Logic Programming, the distribution semantics is one of the prominent approaches for representing uncertainty and underlies many languages such as ICL, PRISM, ProbLog and LPADs.
Learning the parameters for such languages requires an Expectation Maximization algorithm since their equivalent Bayesian networks contain hidden variables.
EMBLEM (EM over BDDs for probabilistic Logic programs Efficient Mining) is an EM algorithm for languages following the distribution semantics that computes expectations directly on the Binary Decision Diagrams that are built for inference.
In this paper we present experiments comparing EMBLEM with LeProbLog, Alchemy, CEM, RIB and LFI-ProbLog on six real world datasets. The results show that EMBLEM is able to solve problems on which the other systems fail and it often achieves significantly higher areas under the Precision Recall and the ROC curves in a similar time.},
  keywords = {Statistical Relational Learning, Probabilistic Inductive Logic Programming, Probabilistic Logic Programming,  Expectation Maximization, Binary Decision Diagrams,
Logic Programs with Annotated Disjunctions
},
  volume = {8},
  number = {1},
  pages = {3-18},
  doi = {10.3233/IA-2012-0027}
}
@article{RigDiM12-ML-IJ,
  author = {Fabrizio Riguzzi and  Di Mauro, Nicola},
  title = {Applying the Information Bottleneck to Statistical Relational Learning},
  year = {2012},
  journal = {Machine Learning},
  volume = {86},
  number = {1},
  pages = {89--114},
  note = {The original publication is available at \url{http://www.springerlink.com}},
  pdf = {http://ds.ing.unife.it/~friguzzi/Papers/RigDiM11-ML-IJ.pdf},
  doi = {10.1007/s10994-011-5247-6},
  publisher = {Springer},
  copyright = {Springer},
  address = {Heidelberg, Germany},
  abstract = {In this  paper we  propose to apply  the Information  Bottleneck (IB) approach  to the  sub-class of
Statistical Relational  Learning (SRL) languages that  are reducible to Bayesian  networks. When the
resulting networks involve hidden variables, learning these languages requires the use of techniques
for  learning from incomplete  data such  as the  Expectation Maximization  (EM) algorithm.
Recently, the IB approach was shown to be able to avoid some of the local maxima in which EM can get
trapped when learning with hidden variables.  Here  we present the algorithm Relational Information Bottleneck
(RIB)
that learns the parameters of SRL languages reducible
to Bayesian Networks.
 In particular, we present the specialization  of RIB to a  language belonging to the family of languages based on the distribution semantics, Logic Programs with  Annotated Disjunction (LPADs). This language is prototypical for such a family and its equivalent Bayesian networks contain hidden  variables. RIB is evaluated on the IMDB, Cora and artificial datasets and compared with LeProbLog, EM, Alchemy and PRISM.
The  experimental results show that  RIB has good performances especially when some logical atoms are unobserved.
Moreover, it is particularly suitable when learning from interpretations that share the same Herbrand base.},
  keywords = {Statistical Relational Learning, Probabilistic Inductive Logic Programming, Probabilistic Logic Programming,  Information Bottleneck,
Logic Programs with Annotated Disjunctions
}
}
@article{RigSwi11-ICLP11-IJ,
  author = {Fabrizio Riguzzi and Terrance Swift},
  title = {The {PITA} System: Tabling and Answer Subsumption for Reasoning under Uncertainty},
  year = {2011},
  journal = {Theory and Practice of Logic Programming, 27th International
Conference on Logic Programming (ICLP'11) Special Issue, Lexington, Kentucky
6-10 July 2011},
  editor = {John Gallagher and Michael Gelfond},
  volume = {11},
  number = {4--5},
  publisher = {Cambridge University Press},
  copyright = {Cambridge University Press},
  abstract = {Many real world domains require the representation of a measure of
uncertainty.  The most common such representation is probability, and
the combination of probability with logic programs has given rise to
the field of Probabilistic Logic Programming (PLP), leading to
languages such as the Independent Choice Logic, Logic Programs with
Annotated Disjunctions (LPADs), Problog, PRISM and others. These languages
share a similar distribution semantics, and methods have been devised
to translate programs between these languages. 
The complexity of computing the probability of queries to these
general PLP programs is very high due to the need to combine the
probabilities of explanations that may not be exclusive.  As one
alternative, the PRISM system reduces the complexity of query
answering by restricting the form of programs it can evaluate.  As an
entirely different alternative, Possibilistic Logic Programs adopt a
simpler metric of uncertainty than probability.

Each of these approaches -- general PLP, restricted PLP, and
Possibilistic Logic Programming -- can be useful in different domains
depending on the form of uncertainty to be represented, on the form of
programs needed to model problems, and on the scale of the problems to
be solved.  In this paper, we show how the PITA system, which
originally supported the general PLP language of LPADs, can also
efficiently support restricted PLP and Possibilistic Logic Programs.
PITA relies on tabling with answer subsumption and consists of a
transformation along with an API for library functions that interface
with answer subsumption.  We show that, by adapting its transformation
and library functions, PITA can be parameterized to PITA(IND,EXC) 
which supports the restricted PLP of PRISM, including optimizations
that reduce non-discriminating arguments and the computation of
Viterbi paths.  Furthermore, we show PITA to be competitive with PRISM
for complex queries to Hidden Markov Model examples, and sometimes
much faster.
We further show how PITA can be parameterized to PITA(COUNT) which
computes the number of different explanations for a subgoal, and to
PITA(POSS) which scalably implements Possibilistic Logic Programming.
PITA is a supported package in version 3.3 of XSB.
},
  keywords = {Probabilistic Logic Programming, Possibilistic Logic Programming, Tabling, Answer Subsumption, Program Transformation},
  pages = {433--449},
  pdf = {http://ds.ing.unife.it/~friguzzi/Papers/RigSwi-ICLP11.pdf},
  doi = {10.1017/S147106841100010X},
  http = {http://journals.cambridge.org/repo_A83y6WKy},
  url = {http://arxiv.org/pdf/1107.4747v1},
  arxiv = {1107.4747}
}
@article{AlbGavLam11-IA-IJ,
  author = {Marco Alberti and Marco Gavanelli and Evelina Lamma and Fabrizio Riguzzi and Sergio Storari},
  title = {Learning specifications of interaction protocols and business processes and proving their properties},
  journal = {Intelligenza artificiale},
  year = 2011,
  volume = 5,
  number = 1,
  pages = {71--75},
  month = feb,
  doi = {10.3233/IA-2011-0006},
  issn = {1724-8035},
  abstract = {In this paper, we overview our recent research
  activity concerning the induction of Logic Programming
  specifications, and the proof of their properties via Abductive
  Logic Programming. Both the inductive and abductive tools here
  briefly described have been applied to respectively learn and verify
  (properties of) interaction protocols in multi-agent systems, Web
  service choreographies, careflows and business processes.},
  pdf = {http://ds.ing.unife.it/~friguzzi/Papers/AlbGavLam-IA08.pdf}
}
@article{GavRigMilCag10-ICLP10-IJ,
  author = {Marco Gavanelli and Fabrizio Riguzzi and Michela Milano and Paolo Cagnoli},
  title = {{L}ogic-{B}ased {D}ecision {S}upport for {S}trategic {E}nvironmental {A}ssessment},
  year = {2010},
  editor = {M.~Hermenegildo and T.~Schaub},
  month = jul,
  journal = {Theory and Practice of Logic Programming, 26th Int'l.
Conference on Logic Programming (ICLP'10) Special Issue},
  pdf = {http://ds.ing.unife.it/~friguzzi/Papers/GavRigMilCag-ICLP10.pdf},
  url = {http://journals.cambridge.org/action/displayIssue?jid=TLP&volumeId=10&seriesId=0&issueId=4-6},
  http = {http://journals.cambridge.org/repo_A78jDFM5},
  volume = {10},
  number = {4-6},
  publisher = {Cambridge University Press},
  copyright = {Cambridge University Press},
  abstract = {Strategic Environmental Assessment is a procedure aimed at
introducing systematic assessment of the environmental effects of
plans and programs. This procedure is based on the so called
coaxial matrices that define dependencies between plan activities
(infrastructures, plants, resource extractions, buildings, etc.)
and positive and negative environmental impacts, and dependencies
between these impacts and environmental receptors. Up to now, this
procedure is manually implemented by environmental experts for
checking the environmental effects of a given plan or program, but
it is never applied during the plan/program construction. A
decision support system, based on a clear logic semantics, would
be an invaluable tool not only in assessing a single, already
defined plan, but also during the planning process in order to
produce an optimized, environmentally assessed plan and to study
possible alternative scenarios. We propose two logic-based
approaches to the problem, one based on Constraint Logic
Programming and one on Probabilistic Logic Programming that could
be, in the future, conveniently merged to exploit the advantages
of both. We test the proposed approaches on a real energy plan and
we discuss their limitations and advantages.},
  keywords = {Strategic Environmental Assessment, Regional Planning, Constraint
Logic Programming, Probabilistic Logic Programming, Causality},
  doi = {10.1017/S1471068410000335},
  pages = {643--658},
  arxiv = {1007.3159}
}
@article{Rig10-FI-IJ,
  author = {Fabrizio Riguzzi},
  title = {{SLGAD} Resolution for Inference on {Logic Programs with Annotated 
Disjunctions}},
  journal = {Fundamenta Informaticae},
  abstract = {Logic Programs with Annotated Disjunctions (LPADs) allow to express 
probabilistic information in logic programming. The semantics of an LPAD is 
given in terms of well\--founded models of the normal logic programs obtained 
by selecting one disjunct from each ground LPAD clause. 

Inference on LPADs can be performed using either the system Ailog2, that was 
developed for the Independent Choice Logic, or SLDNFAD, an algorithm based on 
SLDNF.  However, both of these algorithms run the risk of going into infinite 
loops and of performing redundant computations.

In order to avoid these problems, we present SLGAD resolution that  computes 
the (conditional) probability of a ground query from a range\--restricted LPAD 
and is  based on  SLG resolution for normal logic programs. As SLG, it uses 
tabling to avoid some infinite loops and to avoid redundant computations.

The performances of SLGAD are evaluated on classical benchmarks for normal logic 
programs under the well\--founded semantics, namely a 2\--person game and the 
ancestor relation, and on a game of dice.

SLGAD is compared with  Ailog2 and  SLDNFAD on the problems in which they do 
not go into infinite loops, namely those that are described by a  modularly 
acyclic program.

On the 2\--person game and the ancestor relation, SLGAD is more expensive than 
SLDNFAD on problems where SLDNFAD succeeds but is faster than Ailog2 when the 
query is true in an exponential number of instances.

If the program requires the repeated computation of similar goals, as for the 
dice game, then SLGAD outperforms both Ailog2 and SLDNFAD.},
  keywords = {Probabilistic Logic Programming, Well-Founded Semantics, Logic Programs with Annotated Disjunctions, SLG Resolution},
  month = oct,
  volume = {102},
  number = {3-4},
  year = {2010},
  pages = {429--466},
  doi = {10.3233/FI-2010-392},
  publisher = {{IOS} Press},
  pdf = {http://ds.ing.unife.it/~friguzzi/Papers/Rig10-FI-IJ.pdf},
  url = {http://iospress.metapress.com/content/h284771xj3rv48v1/?p=5a13bfb0811a4f8d88d661eb7cf8a649&pi=7},
  scopus = {2-s2.0-78650327867},
  isi = {WOS:000284311600008}
}
@article{Rig09-LJIGPL-IJ,
  author = {Fabrizio Riguzzi},
  title = {Extended Semantics and  Inference for the {Independent Choice Logic}},
  journal = {Logic Journal of the IGPL},
  publisher = {Oxford University Press},
  volume = {17},
  number = {6},
  pages = {589--629},
  address = {Oxford, \UK},
  year = {2009},
  abstract = {The Independent Choice Logic (ICL) is a  language for expressing 
probabilistic information in logic programming that adopts a distribution 
semantics: an ICL theory defines a distribution over a set of possible worlds 
that are normal logic programs. The probability of a query is then given by the 
sum of the probabilities of worlds where the query is true.

The ICL semantics requires the theories to be acyclic. This is a strong 
limitation that rules out many interesting programs.
In this paper we present an extension of the ICL semantics that allows theories 
to be modularly acyclic.

Inference with ICL can be performed with the Cilog2 system that  computes 
explanations to queries and then  makes them mutually incompatible by means of 
an iterative algorithm.

We propose the system PICL (for Probabilistic inference with ICL) that computes 
the explanations to queries by means of a modification of SLDNF\--resolution 
and then makes them mutually incompatible by means of Binary Decision Diagrams.

PICL and Cilog2 are compared on problems that involve computing the probability 
of a connection between two nodes in biological graphs and social networks. 
PICL turned to be more efficient, handling larger networks/more complex queries 
in a shorter time than Cilog2. This is true both for marginal and for 
conditional queries.
},
  doi = {10.1093/jigpal/jzp025},
  url = {http://jigpal.oxfordjournals.org/cgi/reprint/jzp025?ijkey=picqzY6rpyU6emf&keytype=ref },
  http = {http://jigpal.oxfordjournals.org/cgi/content/abstract/jzp025?ijkey=picqzY6rpyU6emf&keytype=ref },
  keywords = {Probabilistic Logic Programming, Independent Choice Logic, Modularly acyclic programs, SLDNF-Resolution},
  copyright = {Fabrizio Riguzzi, exclusively licensed to Oxford University Press}
}
@article{CheLamMel09-TOPNOC-IJ,
  author = {Federico Chesani and Evelina Lamma and
Paola Mello and Marco Montali   and Fabrizio Riguzzi and Sergio
Storari},
  title = {Exploiting Inductive Logic Programming Techniques for Declarative 
Process Mining},
  journal = {LNCS Transactions on Petri Nets and Other Models of Concurrency, 
{ToPNoC} {II}},
  year = {2009},
  publisher = {Springer},
  address = {Heidelberg, \Germany},
  note = {The original publication is available at \url{http://www.springerlink.com}},
  series = {Lecture Notes on Computer Science},
  volume = {5460},
  pages = {278--295},
  doi = {10.1007/978-3-642-00899-3_16},
  issn = {1867-7193},
  pdf = {http://ds.ing.unife.it/~friguzzi/Papers/CheLamMel-TOPNOC09.pdf},
  url = {http://www.springerlink.com/content/c4j2k38675588759/},
  abstract = {In the last few years, there has been a growing interest in the
adoption of declarative paradigms for modeling and verifying process
models. These paradigms provide an abstract and human understandable
way of specifying constraints that must hold among activities
executions rather than focusing on a specific procedural solution.
Mining such declarative descriptions is still an open challenge. In
this paper, we present a logic-based approach for tackling this
problem.  It relies on Inductive Logic Programming techniques and,
in particular, on a modified version of the Inductive Constraint
Logic algorithm. We investigate how, by properly tuning the learning
algorithm, the approach can be adopted to mine models expressed in
the ConDec notation, a graphical language for the declarative
specification of business processes. Then, we sketch how such a
mining framework has been concretely  implemented as a ProM plug-in
called DecMiner. We finally discuss the effectiveness of the
approach by means of an example which shows the ability of the
language to model concurrent activities and of DecMiner to learn
such a model.},
  keywords = {Process Mining, Inductive Logic Programming, Declarative Process Languages},
  copyright = {Springer}
}
@article{StoRigLam09-IDA-IJ,
  author = {Sergio Storari and Fabrizio Riguzzi and Evelina Lamma},
  title = {Exploiting Association and Correlation Rules Parameters for Learning 
Bayesian Networks},
  journal = {Intelligent Data Analysis},
  year = {2009},
  pages = {	689--701},
  publisher = {{IOS} Press},
  volume = {13},
  issue = {5},
  address = {Amsterdam, \TheNetherlands},
  pdf = {http://ds.ing.unife.it/~friguzzi/Papers/StoRigLam-IDA09.pdf},
  doi = {10.3233/IDA-2009-0388},
  url = {http://iospress.metapress.com/content/59661362p1418230/},
  abstract = { In data mining, association and correlation rules
are inferred from data in order to highlight  statistical dependencies among 
attributes. The metrics defined for evaluating these rules can be exploited to 
score relationships between attributes in Bayesian network learning. In this 
paper, we propose two novel methods for learning Bayesian networks from data 
that are
based on the K2 learning algorithm and that improve it by exploiting parameters
normally defined for association and correlation rules.
In particular, we propose the algorithms K2-Lift and K2-$X^{2}$, that exploit 
the lift metric and the $X^2$ metric respectively. We compare 
K2\--Lift, K2-$X^{2}$ with K2 on artificial data and on 
three test Bayesian networks. The experiments show that both our algorithms
improve K2 with respect to the quality of the
learned network. Moreover, a comparison of K2\--Lift and K2-$X^{2}$ with a 
genetic algorithm approach on two benchmark networks show superior results on 
one network and comparable results on the other.},
  keywords = {Bayesian Networks Learning, K2, Association Rules,  Correlation
  Rules},
  copyright = {Sergio Storari, Fabrizio Riguzzi and Evelina Lamma, exclusively licensed to {IOS} Press}
}
@article{Rig08-ML-IJ,
  author = {
 Fabrizio Riguzzi},
  title = {{ALLPAD}: Approximate Learning of Logic Programs with Annotated Disjunctions},
  journal = {Machine Learning},
  note = {The original publication is available at \url{http://www.springerlink.com}},
  year = {2008},
  volume = {70},
  number = {2-3},
  month = mar,
  pdf = {http://ds.ing.unife.it/~friguzzi/Papers/Rig-ML07.pdf},
  doi = {10.1007/s10994-007-5032-8 },
  url = {http://dx.medra.org/10.1007/s10994-007-5032-8},
  abstract = {Logic Programs with Annotated Disjunctions (LPADs) provide a simple 
and elegant framework for representing probabilistic knowledge in logic programming. 
In this paper we consider the problem of learning ground LPADs starting from a set of 
interpretations annotated with their probability. We present the system ALLPAD for 
solving this problem. ALLPAD modifies the previous system LLPAD in order to tackle 
real world learning problems more effectively. This is achieved by looking for an 
approximate solution rather than a perfect one. A number of experiments have been 
performed on real and artificial data for evaluating ALLPAD, showing the feasibility 
of the approach},
  keywords = {Inductive logic programming, Probabilistic logic programming, Statistical relational learning,
 Logic programs with annotated disjunctions},
  pages = {207--223},
  publisher = {Springer},
  address = {Heidelberg, \Germany},
  copyright = {Springer}
}
@article{LamMelNan06-TITB-IJ,
  author = {Evelina Lamma and Paola Mello and Annamaria Nanetti and
  Fabrizio Riguzzi and Sergio Storari and Gianfranco Valastro},
  title = {Artificial Intelligence Techniques for Monitoring Dangerous Infections},
  journal = {IEEE Transaction on Information Technology in Biomedicine},
  year = {2006},
  publisher = {IEEE Computer Society Press},
  address = {Washington, DC, \USA},
  volume = {10},
  number = {1},
  pages = {143-155},
  month = jan,
  issn = {1089-7771},
  doi = {10.1109/TITB.2005.855537},
  pdf = {http://ds.ing.unife.it/~friguzzi/Papers/LamMelNanRigStoVal-TITB06.pdf},
  url = {http://dx.medra.org/10.1109/TITB.2005.855537},
  abstract = {
The monitoring and detection of nosocomial infections is a very
important problem arising in hospitals. A hospital-acquired or
nosocomial infection is a disease that develops after the
admission into the hospital and it is the consequence of a
treatment, not necessarily a surgical one, performed by the
medical staff. Nosocomial infections are dangerous because they
are caused by bacteria which have dangerous (critical) resistance
to antibiotics. This problem is very serious all over the world.
In Italy, actually almost 5-8\% of the patients admitted into
hospitals develop this kind of infection. In order to reduce this
figure, policies for controlling infections should be adopted by
medical practitioners. In order to support them in this complex
task, we have developed a system, called MERCURIO, capable of
managing different aspects of the problem. The objectives of this
system are the validation of microbiological data and the
creation of a real time epidemiological information system. The
system is useful for laboratory physicians, because it supports
them in the execution of the microbiological analyses; for
clinicians, because it supports them in the definition of the
prophylaxis, of the most suitable antibiotic therapy and in the
monitoring of patients' infections, and for epidemiologists,
because it allows them to identify outbreaks and to study
infection dynamics. In order to achieve these objectives we have
adopted expert system and data mining techniques. We have also
integrated a statistical module that monitors the diffusion of
nosocomial infections over time in the hospital and that strictly
interacts with the knowledge based module. Data mining techniques
have been used for improving the system knowledge base. The
knowledge discovery process is not antithetic, but complementary
to the one based on manual knowledge elicitation. In order to
verify the reliability of the tasks performed by MERCURIO and the
usefulness of the knowledge discovery approach, we performed a
test based on a dataset of real infection events. In the
validation task MERCURIO achieved an accuracy of 98.5\%, a
sensitivity of 98.5\% and a specificity of 99\%. In the therapy
suggestion task MERCURIO achieved very high Accuracy and
Specificity as well. The  executed test provided many insights to
experts too (we discovered some of their mistakes). The knowledge
discovery approach was very effective in validating part of
MERCURIO knowledge base and also in extending it with new
validation rules, confirmed by  interviewed microbiologists and
peculiar to the hospital laboratory under consideration.},
  keywords = {Microbiology,  Knowledge Based Systems, Decision Support Systems,
Data Mining, Classification},
  copyright = {IEEE}
}
@article{LamMelRig04-CJ-IJ,
  author = {Evelina  Lamma and Paola Mello and Fabrizio Riguzzi},
  title = {A System for Measuring Function Points from an {ER}-{DFD} Specification},
  journal = {The Computer Journal},
  abstract = {We present a tool for measuring the Function Point
software metric from the specification of a software system
expressed in the form of an Entity Relationship diagram plus a
Data Flow Diagram (ER-DFD).  First, the informal and general
Function Point counting rules are translated into rigorous rules
expressing properties of the ER-DFD.  Then, the rigorous rules
are translated into Prolog.  The measures given by the system on
a number of case studies are in accordance with those of human
experts.},
  publisher = {Oxford University Press},
  address = {Oxford, \UK},
  keywords = {Software Engineering, Software Metrics, Function Points},
  year = {2004},
  volume = {47},
  number = {3},
  pages = {358--372},
  month = may,
  pdf = {http://ds.ing.unife.it/~friguzzi/Papers/fun.pdf},
  http = {http://comjnl.oxfordjournals.org/cgi/reprint/47/3/358},
  issn = {0010--4620},
  doi = {10.1093/comjnl/47.3.358},
  copyright = {Evelina  Lamma, Paola Mello and Fabrizio Riguzzi, licensed exclusively to The British Computer Society}
}
@article{LamMelRig03-NGC-IJ,
  author = {Evelina Lamma and Fabrizio Riguzzi and Sergio Storari and Paola Mello and
   Annamaria Nanetti},
  title = {Discovering Validation Rules from Micro-biological Data},
  journal = {New Generation Computing},
  year = {2003},
  volume = {21},
  number = {2},
  pages = {123--134},
  publisher = {Ohmsha, Ltd. and Springer},
  address = {Tokyo, \Japan},
  month = feb,
  pdf = {http://ds.ing.unife.it/~friguzzi/Papers/LamRigStoMelNan-NGC03.pdf},
  http = {http://www.springerlink.com/content/b816tm18j5715810},
  doi = {10.1007/BF03037630},
  copyright = {Ohmsha, Ltd. and Springer},
  abstract = {A huge amount of data is daily collected from clinical mi-
crobiology laboratories. These data concern the resistance or susceptibil-
ity of bacteria to tested antibiotics. Almost all microbiology laboratories
follow standard antibiotic testing guidelines which suggest antibiotic test
execution methods and result interpretation and validation (among them,
those annually published by NCCLS). Guidelines basically specify, for
each species, the antibiotics to be tested, how to interpret the results of
tests and a list of exceptions regarding particular antibiotic test results.
Even if these standards are quite assessed, they do not consider pecu-
liar features of a given hospital laboratory, which possibly influence the
antimicrobial test results, and the further validation process.
In order to improve and better tailor the validation process, we have
applied knowledge discovery techniques, and data mining in particular,
to microbiological data with the purpose of discovering new validation
rules, not yet included in NCCLS guidelines, but considered plausible and
correct by interviewed experts. In particular, we applied the knowledge
discovery process in order to find (association) rules relating to each other
the susceptibility or resistance of a bacterium to different antibiotics.
This approach is not antithetic, but complementary to that based on
NCCLS rules: it proved very effective in validating some of them, and
also in extending that compendium. In this respect, the new discovered
knowledge has lead microbiologists to be aware of new correlations among
some antimicrobial test results, which were previously unnoticed. Last
but not least, the new discovered rules, taking into account the history
of the considered laboratory, are better tailored to the hospital situation,
and this is very important since some resistances to antibiotics are specific
to particular, local hospital environments.},
  keywords = {Knowledge Discovery and Data mining, Microbiology, Knowledge Based Systems, Knowledge Elicitation}
}
@article{LamRigPer03-NGC-IJ,
  author = {Evelina Lamma and Fabrizio Riguzzi and Lu\'\i{}s Moniz Pereira},
  title = {Belief Revision via {L}amarckian Evolution},
  journal = {New Generation Computing},
  abstract = {We present a system for performing belief revision in a
multi-agent environment.  The system is called GBR (Genetic
Belief Revisor) and it is based on a genetic algorithm. In this
setting, different individuals are exposed to different
experiences. This may happen because the world surrounding an
agent changes over time or because  we allow agents exploring
different parts of the world. The algorithm permits the exchange
of chromosomes from different agents and combines two different
evolution strategies, one based on Darwin's and the other  on
Lamarck's evolutionary theory. The algorithm therefore includes
also a Lamarckian operator that changes the memes of an agent in
order to improve their fitness. The operator is implemented by
means of a belief revision procedure that, by tracing logical
derivations, identifies the memes leading to contradiction.
Moreover, the algorithm comprises a special crossover mechanism
for memes in which a meme can be acquired from another agent only
if the other agent has ``accessed'' the meme, i.e. if an
application of the Lamarckian operator has read or modified the
meme.


Experiments have been performed on the $n$-queen problem and on a
problem of digital circuit diagnosis. In the case of the
$n$-queen problem, the addition of the Lamarckian operator in the
single agent case improves the fitness of the best solution. In
both cases the experiments show that the distribution of
constraints, even if it may lead to a reduction of the fitness of
the best solution, does not produce a significant reduction.},
  publisher = {Ohmsha, Ltd. and Springer},
  address = {Tokyo, \Japan},
  keywords = {Genetic_Algorithms,Theory_Revision},
  year = {2003},
  volume = {21},
  number = {3},
  month = aug,
  pages = {247--275},
  pdf = {http://ds.ing.unife.it/~friguzzi/Papers/LamRigPer-NGC03.pdf},
  http = {http://www.springerlink.com/content/063764w6n3847825/},
  doi = {10.1007/BF03037475},
  copyright = {Ohmsha, Ltd. and Springer}
}
@article{CucMelPic01-IDA-IJ,
  author = {Rita Cucchiara and Paola Mello and Massimo Piccardi and Fabrizio Riguzzi},
  title = {An Application of Machine Learning and Statistics to Defect Detection},
  journal = {Intelligent Data Analysis},
  year = {2001},
  volume = {5},
  number = {2},
  pages = {151--164},
  publisher = {{IOS} Press},
  address = {Amsterdam, \TheNetherlands},
  month = {March/April},
  pdf = {http://ds.ing.unife.it/~friguzzi/Papers/CucMelPic-IDA01.pdf},
  http = {http://iospress.metapress.com/content/rm7x4g5k9ncb3yck/?genre=article&issn=1088-467X&volume=5&issue=2&spage=151},
  copyright = {Rita Cucchiara, Paola Mello, Massimo Piccardi, Fabrizio Riguzzi, exclusively licensed to {IOS} Press}
}
@article{LamMaeMel01-ENTCS-IJ,
  author = {Evelina Lamma and Leonardo Maestrami and Paola Mello and Fabrizio Riguzzi
    and Sergio Storari},
  title = {Rule-based Programming for Building Expert Systems: a Comparison in the Microbiological Data Validation and Surveillance Domain},
  journal = {Electronic Notes in Theoretical Computer Science},
  volume = {59},
  issue = {4},
  publisher = {Elsevier Science Publishers},
  editor = {Mark van den Brand and Rakesh Verma},
  year = {2001},
  address = {Amsterdam, \TheNetherlands},
  doi = {10.1016/S1571-0661(04)00299-3},
  url = {http://www.sciencedirect.com/science/article/pii/S1571066104002993},
  pdf = {http://ds.ing.unife.it/~friguzzi/Papers/LamMaeMel-ENTCS01.pdf},
  month = sep,
  abstract = {In this work, we compare three rule-based programming tools used for building an
expert system for microbiological laboratory data validation and bacteria infections
monitoring. The first prototype of the system was implemented in KAPPA-PC. We
report on the implementation and performance by comparing KAPPA-PC with two
other more recent tools, namely JESS and ILOG JRULES. In order to test each
tool we realized three simple test applications capable to perform some tasks that
are peculiar of our expert system.},
  keywords = {Expert Systems, Knowledge-based Systems, Microbiology}
}
@article{LamPerRig00-ML-IJ,
  author = {Evelina Lamma and Fabrizio Riguzzi and Lu\'{i}s Moniz Pereira},
  title = {Strategies in Combined Learning via Logic Programs},
  journal = {Machine Learning},
  volume = {38},
  number = {1/2},
  year = {2000},
  month = {January/February},
  pages = {63--87},
  keywords = {ILP Implementation,ILP Theory,Knowledge Representation,Negation},
  abstract = {We discuss the adoption of a three-valued setting for
inductive concept learning. Distinguishing between what is true, what
is false and what is unknown can be useful in situations where decisions
have to be taken on the basis of scarce, ambiguous, or downright contradictory
information. In a three-valued setting, we learn a definition for both
the target concept and its opposite, considering positive and negative
examples as instances of two disjoint classes. To this purpose, we
adopt Extended Logic Programs (ELP) under a Well-Founded Semantics
with explicit negation WFSX as the representation formalism for learning,
and show how ELPs can be used to specify combinations of strategies
in a declarative way also coping with contradiction and exceptions.
Explicit negation is used to represent the opposite concept, while
default negation is used to ensure consistency and to handle exceptions
to general rules. Exceptions are represented by examples covered by
the definition for a concept that belong to the training set for the
opposite concept.

Standard Inductive Logic Programming techniques are employed to learn
the concept and its opposite. Depending on the adopted technique, we
can learn the most general or the least general definition. Thus, four
epistemological varieties occur, resulting from the combination of
most general and least general solutions for the positive and negative
concept. We discuss the factors that should be taken into account when
choosing and strategically combining the generality levels for positive
and negative concepts.

In the paper, we also handle the issue of strategic combination of
possibly contradictory learnt definitions of a predicate and its explicit
negation.

All in all, we show that extended logic programs under well-founded
semantics with explicit negation add expressivity to learning tasks,
and allow the tackling of a number of representation and strategic
issues in a principled way.

Our techniques have been implemented and examples run on a state-of-the-art
logic programming system with tabling which implements WFSX.},
  pdf = {http://ds.ing.unife.it/~friguzzi/Papers/LamRigPer-ML00.pdf},
  publisher = {Springer Netherlands},
  address = {Dordrecht, \TheNetherlands},
  doi = {10.1023/A:1007681906490},
  http = {http://link.springer.com/article/10.1023%2FA%3A1007681906490},
  copyright = {Springer Netherlands},
  note = {The original publication is available at \url{http://www.springerlink.com}}
}
@article{KakRig00-NGC-IJ,
  author = {Antonis C. Kakas AND Fabrizio Riguzzi},
  title = {Abductive Concept Learning},
  journal = {New Generation Computing},
  volume = {18},
  number = {3},
  year = {2000},
  pages = {243--294},
  keywords = {Abduction, Integrity Constraints, Multiple Predicate Learning},
  address = {Tokyo, \Japan},
  month = may,
  publisher = {Ohmsha, Ltd. and Springer},
  abstract = {We investigate how abduction and induction can be integrated into a common
learning framework. In particular, we consider an extension of Inductive
Logic Programming (ILP) for the case in which both the background and the
target theories are abductive logic programs and where an abductive notion
of entailment is used as the basic coverage relation for learning. This extended
learning framework has been called Abductive Concept Learning (ACL). In
this framework, it is possible to learn with incomplete background
information about the training examples by exploiting the hypothetical
reasoning of abduction. We also study how the ACL framework can be
used as a basis for multiple predicate learning.

An algorithm for ACL is developed by suitably extending the top-down ILP
method: the deductive proof procedure of Logic Programming is replaced by
an abductive proof procedure for Abductive Logic Programming. This
algorithm also incorporates a phase for learning integrity constraints by
suitably employing a system that learns from interpretations like ICL. The
framework of ACL thus integrates the two ILP settings of explanatory
(predictive) learning and confirmatory (descriptive) learning. The above
algorithm has been implemented into a system also called ACL\footnote{The learning systems developed in this work together
with sample experimental data can be found at the following
address: {\tt http://www-lia.deis.unibo.it/Software/ACL/}} Several
experiments have been performed that show the effectiveness of the ACL
framework in learning from incomplete data and its appropriate use for
multiple predicate learning.},
  pdf = {http://ds.ing.unife.it/~friguzzi/Papers/KakRIg-NGC00.pdf},
  http = {http://link.springer.com/article/10.1007%2FBF03037531},
  doi = {10.1007/BF03037531},
  copyright = {Ohmsha, Ltd. and Springer}
}
@article{LamMelMil99-InfSciences99-IJ,
  author = {Evelina ~Lamma AND Paola ~Mello AND Michela ~Milano AND Fabrizio
   ~Riguzzi},
  title = {Integrating Induction and Abduction in Logic
Programming},
  journal = {Information Sciences},
  pages = {25--54},
  volume = {116},
  number = {1},
  year = 1999,
  month = may,
  abstract = {We propose an approach for the integration of abduction and induction in
Logic Programming. We define an Abductive Learning Problem as an
extended Inductive Logic Programming problem where both the
background and target theories are abductive theories and where
abductive derivability is used as the coverage relation instead
of deductive derivability. The two main benefits of this
integration are the possibility of learning in presence of
incomplete knowledge and the increased expressive power of the
background and target theories.  We present the system LAP
(Learning Abductive Programs) that is able to solve this extended
learning problem and we describe, by means of examples, four
different learning tasks that can be performed by the system:
learning from incomplete knowledge, learning rules with
exceptions, learning from integrity constraints and learning
recursive predicates.},
  keywords = {Abduction, Negation, Integrity Constraints},
  publisher = {Elsevier Science},
  address = {Amsterdam, \TheNetherlands},
  pdf = {http://ds.ing.unife.it/~friguzzi/Papers/LamMelMil-IS99.pdf},
  doi = {10.1016/S0020-0255(98)10092-0},
  url = {http://www.sciencedirect.com/science/article/pii/S0020025598100920},
  copyright = {Elsevier}
}

This file was generated by bibtex2html 1.96.