@article {18536, title = {Credit Card Fraud Detection: A Realistic Modeling and a Novel Learning Strategy}, journal = {IEEE Transactions on Neural Networks and Learning Systems}, year = {2018}, pages = {1-14}, keywords = {Amplitude modulation, Area measurement, Companies, Concept drift, Credit card fraud detection, Credit cards, learning in nonstationary environments, Learning systems, Tools, Training, unbalanced classification.}, issn = {2162-237X}, doi = {10.1109/TNNLS.2017.2736643}, author = {Dal Pozzolo, Andrea and Boracchi, Giacomo and Caelen, Olivier and Alippi, Cesare and Bontempi, Gianluca} } @proceedings {18514, title = {CCM: Controlling the Change Magnitude in High Dimensional Data}, journal = {In Proceedings of the 2nd INNS Conference on Big Data 2016 (INNS Big Data 2016)}, year = {2016}, month = {10/2016}, pages = {1-10}, address = {Thessaloniki, Greece}, abstract = {Change-detection algorithms are often tested on real-world datasets where changes are synthetically introduced. While this common practice allows generating multiple datasets to obtain stable performance measures, it is often quite arbitrary since the change magnitude is seldom controlled. Thus, experiments { in particular those on multivariate and high-dimensional data. We here present a rigorous framework for introducing changes having a controlled magnitude in multivariate datasets. In particular, we introduce changes by directly roto-translating the data, and we measure the change magnitude by the symmetric Kullback-Leibler divergence between pre- ad post-change distributions. We present an iterative algorithm that identities the roto-translation parameters yielding the desired change magnitude, and we prove its convergence analytically. We also illustrate our MATLAB framework that introduces changes having a controlled magnitude in real-world datasets, which is made publicly available for download. }, author = {Alippi, Cesare and Boracchi, Giacomo and Carrera, Diego} } @conference {18447, title = {Change Detection in Multivariate Datastreams: Likelihood and Detectability Loss}, booktitle = {25th International Joint Conference on Artificial Intelligence (IJCAI-16)}, year = {2016}, month = {07/2016}, address = { New York, USA}, abstract = {We address the problem of detecting changes in multivariate datastreams, and we investigate the intrinsic difficulty that change-detection methods have to face when the data-dimension scales. In particular, we consider the general approach that detects changes by comparing the distribution of the log-likelihood of the datastream over different time windows. Despite the fact that this approach constitutes the frame for several change-detection methods, its effectiveness when the dimension of data scales has never been investigated, which is indeed the goal of our paper. We show that the magnitude of the change can be naturally measured by the symmetric Kullback-Leibler divergence between the pre- and post-change distributions, and that the detectability of a change of a given magnitude worsens when the data-dimension increases. This structural problem, which we refer to as detectability loss, is due to the linear relationship existing between the variance of the log-likelihood and the data dimension, and reveals to be harmful even at low data-dimensions (say, 10). We analytically derive the detectability loss on Gaussian-distributed datastreams, and empirically demonstrate that this problem holds also on real-world datasets.}, author = {Alippi, Cesare and Boracchi, Giacomo and Carrera, Diego and Roveri, Manuel} } @article {18543, title = {Model Complexity, Regularization, and Sparsity [Guest Editorial]}, year = {2016}, keywords = {Adaptation models, Computational complexity, machine learning, Sparse matrices, Special issues and sections}, doi = {10.1109/MCI.2016.2602071}, author = {Alippi, Cesare and Boracchi, Giacomo and Wohlberg, Brendt} }