@inproceedings{dcfc63ef6c4144708dbc6bad0745947a,
title = "Representation learning for cross-modality classification",
abstract = "Differences in scanning parameters or modalities can complicate image analysis based on supervised classification. This paper presents two representation learning approaches, based on autoencoders, that address this problem by learning representations that are similar across domains. Both approaches use, next to the data representation objective, a similarity objective to minimise the difference between representations of corresponding patches from each domain. We evaluated the methods in transfer learning experiments on multi-modal brain MRI data and on synthetic data. After transforming training and test data from different modalities to the common representations learned by our methods, we trained classifiers for each of pair of modalities. We found that adding the similarity term to the standard objective can produce representations that are more similar and can give a higher accuracy in these cross-modality classification experiments.",
author = "Tulder, {Gijs van} and {de Bruijne}, Marleen",
year = "2017",
doi = "10.1007/978-3-319-61188-4_12",
language = "English",
isbn = "978-3-319-61187-7",
series = "Lecture notes in computer science",
publisher = "Springer",
pages = "126--136",
editor = "Henning M{\"u}ller and Kelm, {B. Michael} and Tal Arbel and Weidong Cai and Cardoso, {M. Jorge} and Georg Langs and Bjoern Menze and Dimitris Metaxas and Albert Montillo and Wells, {William M.} and Shaoting Zhang and Chung, {Albert C. S.} and Mark Jenkinson and Annemie Ribbens",
booktitle = "Medical Computer Vision and Bayesian and Graphical Models for Biomedical Imaging",
note = "MICCAI International Workshop on Medical Computer Vision 2016 : algorithms for big data, MICCAI-MCV 2016 ; Conference date: 21-10-2016 Through 21-10-2016",
}