2024
Eulig, Elias; Jäger, Fabian; Maier, Joscha; Ommer, Björn; Kachelrieß, Marc
Reconstructing and analyzing the invariances of low-dose CT image denoising networks Journal Article
In: Medical Physics, 2024.
@article{nokey,
title = {Reconstructing and analyzing the invariances of low-dose CT image denoising networks},
author = {Elias Eulig and Fabian Jäger and Joscha Maier and Björn Ommer and Marc Kachelrieß},
url = {https://aapm.onlinelibrary.wiley.com/doi/10.1002/mp.17413},
year = {2024},
date = {2024-09-30},
urldate = {2024-09-30},
journal = {Medical Physics},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Eulig, Elias; Ommer, Björn; Kachelrieß, Marc
Benchmarking deep learning-based low-dose CT image denoising algorithms Journal Article
In: Medical Physics, 2024.
@article{https://doi.org/10.1002/mp.17379,
title = {Benchmarking deep learning-based low-dose CT image denoising algorithms},
author = {Elias Eulig and Björn Ommer and Marc Kachelrieß},
url = {https://aapm.onlinelibrary.wiley.com/doi/abs/10.1002/mp.17379},
doi = {https://doi.org/10.1002/mp.17379},
year = {2024},
date = {2024-09-17},
journal = {Medical Physics},
abstract = {Abstract Background Long-lasting efforts have been made to reduce radiation dose and thus the potential radiation risk to the patient for computed tomography (CT) acquisitions without severe deterioration of image quality. To this end, various techniques have been employed over the years including iterative reconstruction methods and noise reduction algorithms. Purpose Recently, deep learning-based methods for noise reduction became increasingly popular and a multitude of papers claim ever improving performance both quantitatively and qualitatively. However, the lack of a standardized benchmark setup and inconsistencies in experimental design across studies hinder the verifiability and reproducibility of reported results. Methods In this study, we propose a benchmark setup to overcome those flaws and improve reproducibility and verifiability of experimental results in the field. We perform a comprehensive and fair evaluation of several state-of-the-art methods using this standardized setup. Results Our evaluation reveals that most deep learning-based methods show statistically similar performance, and improvements over the past years have been marginal at best. Conclusions This study highlights the need for a more rigorous and fair evaluation of novel deep learning-based methods for low-dose CT image denoising. Our benchmark setup is a first and important step towards this direction and can be used by future researchers to evaluate their algorithms.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Hu, Vincent; Wu, Di; Asano, Yuki; Mettes, Pascal; Fernando, Basura; Ommer, Björn; Snoek, Cees
Flow Matching for Conditional Text Generation in a Few Sampling Steps Proceedings Article
In: Graham, Yvette; Purver, Matthew (Ed.): Proceedings of the 18th Conference of the European Chapter of the Association for Computational Linguistics (Volume 2: Short Papers), pp. 380–392, Association for Computational Linguistics, St. Julian's, Malta, 2024.
@inproceedings{hu-etal-2024-flow,
title = {Flow Matching for Conditional Text Generation in a Few Sampling Steps},
author = {Vincent Hu and Di Wu and Yuki Asano and Pascal Mettes and Basura Fernando and Björn Ommer and Cees Snoek},
editor = {Yvette Graham and Matthew Purver},
url = {https://aclanthology.org/2024.eacl-short.33},
year = {2024},
date = {2024-03-01},
urldate = {2024-03-01},
booktitle = {Proceedings of the 18th Conference of the European Chapter of the Association for Computational Linguistics (Volume 2: Short Papers)},
pages = {380–392},
publisher = {Association for Computational Linguistics},
address = {St. Julian's, Malta},
abstract = {Diffusion models are a promising tool for high-quality text generation. However, current models face multiple drawbacks including slow sampling, noise schedule sensitivity, and misalignment between the training and sampling stages. In this paper, we introduce FlowSeq, which bypasses all current drawbacks by leveraging flow matching for conditional text generation. FlowSeq can generate text in a few steps by training with a novel anchor loss, alleviating the need for expensive hyperparameter optimization of the noise schedule prevalent in diffusion models. We extensively evaluate our proposed method and show competitive performance in tasks such as question generation, open-domain dialogue, and paraphrasing tasks.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Po, Ryan; Yifan, Wang; Golyanik, Vladislav; Aberman, Kfir; Barron, Jonathan T.; Bermano, Amit H.; Chan, Eric Ryan; Dekel, Tali; Holynski, Aleksander; Kanazawa, Angjoo; Liu, C. Karen; Liu, Lingjie; Mildenhall, Ben; Nießner, Matthias; Ommer, Björn; Theobalt, Christian; Wonka, Peter; Wetzstein, Gordon
State of the Art on Diffusion Models for Visual Computing Proceedings Article
In: Computer Graphics Forum 43, 2024.
@inproceedings{po2023state,
title = {State of the Art on Diffusion Models for Visual Computing},
author = {Ryan Po and Wang Yifan and Vladislav Golyanik and Kfir Aberman and Jonathan T. Barron and Amit H. Bermano and Eric Ryan Chan and Tali Dekel and Aleksander Holynski and Angjoo Kanazawa and C. Karen Liu and Lingjie Liu and Ben Mildenhall and Matthias Nießner and Björn Ommer and Christian Theobalt and Peter Wonka and Gordon Wetzstein},
year = {2024},
date = {2024-01-01},
urldate = {2023-01-01},
booktitle = {Computer Graphics Forum 43},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Eulig, Elias; Maier, Joscha; Ommer, Björn; Kachelrieß, Marc
May denoising remove structures? How to reconstruct invariances of CT denoising algorithms Proceedings Article
In: Fahrig, Rebecca; Sabol, John M.; Li, Ke (Ed.): Medical Imaging 2024: Physics of Medical Imaging, International Society for Optics and Photonics SPIE, 2024.
@inproceedings{10.1117/12.3005952,
title = {May denoising remove structures? How to reconstruct invariances of CT denoising algorithms},
author = {Elias Eulig and Joscha Maier and Björn Ommer and Marc Kachelrieß},
editor = {Rebecca Fahrig and John M. Sabol and Ke Li},
url = {https://doi.org/10.1117/12.3005952},
doi = {10.1117/12.3005952},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
booktitle = {Medical Imaging 2024: Physics of Medical Imaging},
volume = {12925},
publisher = {SPIE},
organization = {International Society for Optics and Photonics},
abstract = {Many methods have been developed to reduce radiation dose in computed tomography (CT) scans without sacrificing image quality. Recently, deep learning-based methods have shown promising results on the task of CT image denoising. However, they remain difficult to interpret, and thus safety concerns have been raised. In this work we develop a method to reconstruct the invariances of arbitrary denoising methods with an approach inspired by the optimization schemes commonly used to generate adversarial examples. We apply our method to one proof-of-principle algorithm as well as to two previously proposed denoising networks and show that it can successfully reconstruct their invariances.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Stracke, Nick; Baumann, Stefan Andreas; Susskind, Joshua M.; Bautista, Miguel Angel; Ommer, Björn
CTRLorALTer: Conditional LoRAdapter for Efficient 0-Shot Control & Altering of T2I Models Proceedings Article
In: Proceedings of the European Conference on Computer Vision (ECCV), 2024.
@inproceedings{stracke2024ctrloralterconditionalloradapterefficient,
title = {CTRLorALTer: Conditional LoRAdapter for Efficient 0-Shot Control & Altering of T2I Models},
author = {Nick Stracke and Stefan Andreas Baumann and Joshua M. Susskind and Miguel Angel Bautista and Björn Ommer},
url = {https://compvis.github.io/LoRAdapter/
https://arxiv.org/abs/2405.07913},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
booktitle = {Proceedings of the European Conference on Computer Vision (ECCV)},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hu, Vincent Tao; Baumann, Stefan Andreas; Gui, Ming; Grebenkova, Olga; Ma, Pingchuan; Fischer, Johannes; Ommer, Björn
ZigMa: A DiT-style Zigzag Mamba Diffusion Model Proceedings Article
In: Proceedings of the European Conference on Computer Vision (ECCV), 2024.
@inproceedings{hu2024zigmaditstylezigzagmamba,
title = {ZigMa: A DiT-style Zigzag Mamba Diffusion Model},
author = {Vincent Tao Hu and Stefan Andreas Baumann and Ming Gui and Olga Grebenkova and Pingchuan Ma and Johannes Fischer and Björn Ommer},
url = {https://taohu.me/zigma/
https://arxiv.org/abs/2403.13802},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
booktitle = {Proceedings of the European Conference on Computer Vision (ECCV)},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Fischer, Johannes S.; Gui, Ming; Ma, Pingchuan; Stracke, Nick; Baumann, Stefan A.; Ommer, Björn
Boosting Latent Diffusion with Flow Matching Proceedings Article
In: Proceedings of the European Conference on Computer Vision (ECCV), 2024.
@inproceedings{fischer2024boostinglatentdiffusionflow,
title = {Boosting Latent Diffusion with Flow Matching},
author = {Johannes S. Fischer and Ming Gui and Pingchuan Ma and Nick Stracke and Stefan A. Baumann and Björn Ommer},
url = {https://compvis.github.io/fm-boosting/
https://arxiv.org/abs/2312.07360},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
booktitle = {Proceedings of the European Conference on Computer Vision (ECCV)},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kotovenko, Dima; Grebenkova, Olga; Sarafianos, Nikolaos; Paliwal, Avinash; Ma, Pingchuan; Poursaeed, Omid; Mohan, Sreyas; Fan, Yuchen; Li, Yilei; Ranjan, Rakesh; Ommer, Björn
WaSt-3D: Wasserstein-2 Distance for Scene-to-Scene Stylization on 3D Gaussians Proceedings Article
In: Proceedings of the European Conference on Computer Vision (ECCV), 2024.
@inproceedings{nokey,
title = {WaSt-3D: Wasserstein-2 Distance for Scene-to-Scene Stylization on 3D Gaussians},
author = { Dima Kotovenko and Olga Grebenkova and Nikolaos Sarafianos and Avinash Paliwal and Pingchuan Ma and Omid Poursaeed and Sreyas Mohan and Yuchen Fan and Yilei Li and Rakesh Ranjan and Björn Ommer},
url = {https://compvis.github.io/wast3d/},
year = {2024},
date = {2024-01-01},
booktitle = {Proceedings of the European Conference on Computer Vision (ECCV)},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2023
Kotovenko, Dmytro; Ma, Pingchuan; Milbich, Timo; Ommer, Björn
Cross-Image-Attention for Conditional Embeddings in Deep Metric Learning Proceedings Article
In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 11070-11081, 2023.
@inproceedings{Kotovenko_2023_CVPR,
title = {Cross-Image-Attention for Conditional Embeddings in Deep Metric Learning},
author = {Dmytro Kotovenko and Pingchuan Ma and Timo Milbich and Björn Ommer},
url = {https://openaccess.thecvf.com/content/CVPR2023/papers/Kotovenko_Cross-Image-Attention_for_Conditional_Embeddings_in_Deep_Metric_Learning_CVPR_2023_paper.pdf},
year = {2023},
date = {2023-06-01},
urldate = {2023-06-01},
booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
pages = {11070-11081},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Farshad, Azade; Yeganeh, Yousef; Chi, Yu; Shen, Chengzhi; Ommer, Björn; Navab, Nassir
SceneGenie: Scene Graph Guided Diffusion Models for Image Synthesis Proceedings Article
In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2023.
@inproceedings{farshad2023scenegenie,
title = {SceneGenie: Scene Graph Guided Diffusion Models for Image Synthesis},
author = {Azade Farshad and Yousef Yeganeh and Yu Chi and Chengzhi Shen and Björn Ommer and Nassir Navab},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Bell, Peter; Ommer, Björn
In: Schweiker, Marcel; Hass, Joachim; Novokhatko, Anna; Halbleib, Roxana (Ed.): Measurement and Understanding in Science and Humanities: Interdisciplinary Approaches, pp. 191–200, Springer Fachmedien Wiesbaden, Wiesbaden, 2023, ISBN: 978-3-658-36974-3.
@inbook{Bell2023,
title = {Measuring Art, Counting Pixels? The Collaboration of Art History and Computer Vision Oscillates Between Quantitative and Hermeneutic Methods},
author = {Peter Bell and Björn Ommer},
editor = {Marcel Schweiker and Joachim Hass and Anna Novokhatko and Roxana Halbleib},
url = {https://doi.org/10.1007/978-3-658-36974-3_15},
doi = {10.1007/978-3-658-36974-3_15},
isbn = {978-3-658-36974-3},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
booktitle = {Measurement and Understanding in Science and Humanities: Interdisciplinary Approaches},
pages = {191–200},
publisher = {Springer Fachmedien Wiesbaden},
address = {Wiesbaden},
abstract = {The project "Artificial and Artistic Vision. Computer Vision and Art History in Practical-Methodical Cooperation" is interdisciplinary by definition and also in its personnel composition and combines the humanities, engineering, and natural sciences. Together, prototypes and methodological approaches to an automatic vision that assists art history are being developed in the form of basic research.},
keywords = {},
pubstate = {published},
tppubtype = {inbook}
}
2022
Milbich, Timo; Roth, Karsten; Brattoli, Biagio; Ommer, Björn
Sharing Matters for Generalization in Deep Metric Learning Journal Article
In: IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI), 2022.
@article{6389,
title = {Sharing Matters for Generalization in Deep Metric Learning},
author = {Timo Milbich and Karsten Roth and Biagio Brattoli and Björn Ommer},
url = {https://arxiv.org/abs/2004.05582},
doi = {10.1109/TPAMI.2020.3009620},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI)},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Rombach, Robin; Blattmann, Andreas; Lorenz, Dominik; Esser, Patrick; Ommer, Björn
High-Resolution Image Synthesis with Latent Diffusion Models Conference
Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2022.
@conference{nokey,
title = {High-Resolution Image Synthesis with Latent Diffusion Models},
author = {Robin Rombach and Andreas Blattmann and Dominik Lorenz and Patrick Esser and Björn Ommer},
url = {https://ommer-lab.com/research/latent-diffusion-models/
https://github.com/CompVis/latent-diffusion
https://arxiv.org/abs/2112.10752},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Gangadharan, Vijayan; Zheng, Hongwei; Taberner, Francisco J.; Landry, Jonathan; Nees, Timo A.; Pistolic, Jelena; Agarwal, Nitin; Männich, Deepitha; Benes, Vladimir; Helmstaedter, Moritz; Ommer, Björn; Lechner, Stefan G.; Kuner, Thomas; Kuner, Rohini
Neuropathic pain caused by miswiring and abnormal end organ targeting Journal Article
In: Nature, vol. 606, pp. 137–145, 2022.
@article{nokey,
title = {Neuropathic pain caused by miswiring and abnormal end organ targeting},
author = {Vijayan Gangadharan and Hongwei Zheng and Francisco J. Taberner and Jonathan Landry and Timo A. Nees and Jelena Pistolic and Nitin Agarwal and Deepitha Männich and Vladimir Benes and Moritz Helmstaedter and Björn Ommer and Stefan G. Lechner and Thomas Kuner and Rohini Kuner },
url = {https://www.nature.com/articles/s41586-022-04777-z},
year = {2022},
date = {2022-01-01},
urldate = {2022-05-25},
journal = {Nature},
volume = {606},
pages = {137–145},
abstract = {Nerve injury leads to chronic pain and exaggerated sensitivity to gentle touch (allodynia) as well as a loss of sensation in the areas in which injured and non-injured nerves come together. The mechanisms that disambiguate these mixed and paradoxical symptoms are unknown. Here we longitudinally and non-invasively imaged genetically labelled populations of fibres that sense noxious stimuli (nociceptors) and gentle touch (low-threshold afferents) peripherally in the skin for longer than 10 months after nerve injury, while simultaneously tracking pain-related behaviour in the same mice. Fully denervated areas of skin initially lost sensation, gradually recovered normal sensitivity and developed marked allodynia and aversion to gentle touch several months after injury. This reinnervation-induced neuropathic pain involved nociceptors that sprouted into denervated territories precisely reproducing the initial pattern of innervation, were guided by blood vessels and showed irregular terminal connectivity in the skin and lowered activation thresholds mimicking low-threshold afferents. By contrast, low-threshold afferents—which normally mediate touch sensation as well as allodynia in intact nerve territories after injury—did not reinnervate, leading to an aberrant innervation of tactile end organs such as Meissner corpuscles with nociceptors alone. Genetic ablation of nociceptors fully abrogated reinnervation allodynia. Our results thus reveal the emergence of a form of chronic neuropathic pain that is driven by structural plasticity, abnormal terminal connectivity and malfunction of nociceptors during reinnervation, and provide a mechanistic framework for the paradoxical sensory manifestations that are observed clinically and can impose a heavy burden on patients.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Wright, Matthias; Ommer, Björn
ArtFID: Quantitative Evaluation of Neural Style Transfer Conference
Proceedings of the German Conference on Pattern Recognition (GCPR '22) (Oral), 2022.
@conference{Wright2022,
title = {ArtFID: Quantitative Evaluation of Neural Style Transfer},
author = {Matthias Wright and Björn Ommer},
url = {https://github.com/matthias-wright/art-fid
https://arxiv.org/abs/2207.12280},
doi = {10.48550/arXiv.2207.12280},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Proceedings of the German Conference on Pattern Recognition (GCPR '22) (Oral)},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Rombach, Robin; Blattmann, Andreas; Ommer, Björn
Text-Guided Synthesis of Artistic Images with Retrieval-Augmented Diffusion Models Conference
Proceedings of the European Conference on Computer Vision (ECCV) Workshop on Visart, 2022.
@conference{nokey,
title = {Text-Guided Synthesis of Artistic Images with Retrieval-Augmented Diffusion Models},
author = {Robin Rombach and Andreas Blattmann and Björn Ommer},
url = {https://arxiv.org/abs/2207.13038},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Proceedings of the European Conference on Computer Vision (ECCV) Workshop on Visart},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Blattmann, Andreas; Rombach, Robin; Oktay, Kaan; Ommer, Björn
Retrieval-Augmented Diffusion Models Conference
Neural Information Processing Systems (NeurIPS), 2022., 2022.
@conference{nokey,
title = {Retrieval-Augmented Diffusion Models},
author = {Andreas Blattmann and Robin Rombach and Kaan Oktay and Björn Ommer},
url = {https://arxiv.org/abs/2204.11824},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Neural Information Processing Systems (NeurIPS), 2022.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Eulig, Elias; Ommer, Björn; Kachelrieß, Marc
Reconstructing invariances of CT image denoising networks using invertible neural networks Proceedings Article
In: Stayman, Joseph Webster (Ed.): 7th International Conference on Image Formation in X-Ray Computed Tomography, pp. 123040S, International Society for Optics and Photonics SPIE, 2022.
@inproceedings{10.1117/12.2647170,
title = {Reconstructing invariances of CT image denoising networks using invertible neural networks},
author = {Elias Eulig and Björn Ommer and Marc Kachelrieß},
editor = {Joseph Webster Stayman},
url = {https://doi.org/10.1117/12.2647170},
doi = {10.1117/12.2647170},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {7th International Conference on Image Formation in X-Ray Computed Tomography},
volume = {12304},
pages = {123040S},
publisher = {SPIE},
organization = {International Society for Optics and Photonics},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2021
Milbich, Timo; Roth, Karsten; Sinha, Samarth; Schmidt, Ludwig; Ghassemi, Marzyeh; Ommer, Björn
Characterizing Generalization under Out-Of-Distribution Shifts in Deep Metric Learning Conference
Neural Information Processing Systems (NeurIPS), 2021.
@conference{Milbich2021,
title = {Characterizing Generalization under Out-Of-Distribution Shifts in Deep Metric Learning},
author = {Timo Milbich and Karsten Roth and Samarth Sinha and Ludwig Schmidt and Marzyeh Ghassemi and Björn Ommer},
url = {https://github.com/CompVis/Characterizing_Generalization_in_DML
https://arxiv.org/abs/2107.09562},
year = {2021},
date = {2021-12-10},
urldate = {2021-12-10},
booktitle = {Neural Information Processing Systems (NeurIPS)},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Esser, Patrick; Rombach, Robin; Blattmann, Andreas; Ommer, Björn
ImageBART: Bidirectional Context with Multinomial Diffusion for Autoregressive Image Synthesis Conference
Neural Information Processing Systems (NeurIPS), 2021.
@conference{nokey,
title = {ImageBART: Bidirectional Context with Multinomial Diffusion for Autoregressive Image Synthesis},
author = {Patrick Esser and Robin Rombach and Andreas Blattmann and Björn Ommer},
url = {https://compvis.github.io/imagebart/
https://arxiv.org/abs/2108.08827},
year = {2021},
date = {2021-12-10},
urldate = {2021-12-10},
booktitle = {Neural Information Processing Systems (NeurIPS)},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Ufer, Nikolai; Simon, Max; Lang, Sabine; Ommer, Björn
Large-scale interactive retrieval in art collections using multi-style feature aggregation Journal Article
In: PLoS ONE, vol. 16, no. 11, 2021.
@article{nokey,
title = {Large-scale interactive retrieval in art collections using multi-style feature aggregation},
author = {Nikolai Ufer and Max Simon and Sabine Lang and Björn Ommer},
url = {https://compvis.github.io/visual-search/},
doi = {10.1371/journal.pone.0259718},
year = {2021},
date = {2021-11-24},
urldate = {2021-11-24},
journal = {PLoS ONE},
volume = { 16},
number = {11},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Sardari, Faegheh; Ommer, Björn; Mirmehdi, Majid
Unsupervised View-Invariant Human Posture Representation Conference
British Machine Vision Conference (BMVC), 2021.
@conference{nokey,
title = {Unsupervised View-Invariant Human Posture Representation},
author = {Faegheh Sardari and Björn Ommer and Majid Mirmehdi},
url = {https://arxiv.org/abs/2109.08730},
year = {2021},
date = {2021-10-18},
urldate = {2021-10-18},
booktitle = {British Machine Vision Conference (BMVC)},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Lang, Sabine; Ommer, Björn
Transforming Information Into Knowledge: How Computational Methods Reshape Art History Journal Article
In: Digital Humanities Quaterly (DHQ), vol. 15, no. 3, 2021.
@article{Lang2021,
title = {Transforming Information Into Knowledge: How Computational Methods Reshape Art History},
author = {Sabine Lang and Björn Ommer},
url = {http://digitalhumanities.org/dhq/vol/15/3/000560/000560.html},
year = {2021},
date = {2021-10-17},
urldate = {2021-10-17},
journal = {Digital Humanities Quaterly (DHQ)},
volume = {15},
number = {3},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Blattmann, Andreas; Milbich, Timo; Dorkenwald, Michael; Ommer, Björn
iPOKE: Poking a Still Image for Controlled Stochastic Video Synthesis Conference
Proceedings of the International Conference on Computer Vision (ICCV), 2021.
@conference{Blattmann2021,
title = {iPOKE: Poking a Still Image for Controlled Stochastic Video Synthesis},
author = {Andreas Blattmann and Timo Milbich and Michael Dorkenwald and Björn Ommer},
url = {https://compvis.github.io/ipoke/
https://arxiv.org/abs/2107.02790},
year = {2021},
date = {2021-10-01},
urldate = {2021-10-01},
booktitle = {Proceedings of the International Conference on Computer Vision (ICCV)},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Baur, Stefan Andreas; Emmerichs, David Josef; Moosmann, Frank; Pinggera, Peter; Ommer, Björn; Geiger, Andreas
SLIM: Self-Supervised LiDAR Scene Flow and Motion Segmentation Proceedings Article
In: Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pp. 13126-13136, 2021.
@inproceedings{Baur_2021_ICCV,
title = {SLIM: Self-Supervised LiDAR Scene Flow and Motion Segmentation},
author = {Stefan Andreas Baur and David Josef Emmerichs and Frank Moosmann and Peter Pinggera and Björn Ommer and Andreas Geiger},
url = {https://openaccess.thecvf.com/content/ICCV2021/html/Baur_SLIM_Self-Supervised_LiDAR_Scene_Flow_and_Motion_Segmentation_ICCV_2021_paper.html},
year = {2021},
date = {2021-10-01},
urldate = {2021-10-01},
booktitle = {Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV)},
pages = {13126-13136},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Sanakoyeu, Artsiom; Ma, Pingchuan; Tschernezki, Vadim; Ommer, Björn
Improving Deep Metric Learning by Divide and Conquer Journal Article
In: IEEE Transactions on Pattern Analysis and Machine Intelligence, 2021.
@article{nokey,
title = {Improving Deep Metric Learning by Divide and Conquer},
author = {Artsiom Sanakoyeu and Pingchuan Ma and Vadim Tschernezki and Björn Ommer},
url = {https://github.com/CompVis/metric-learning-divide-and-conquer-improved
https://ieeexplore.ieee.org/document/9540303},
doi = {10.1109/TPAMI.2021.3113270},
year = {2021},
date = {2021-09-16},
urldate = {2021-09-16},
journal = { IEEE Transactions on Pattern Analysis and Machine Intelligence},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Afifi, Mahmoud; Derpanis, Konstantinos G; Ommer, Björn; Brown, Michael S
Learning Multi-Scale Photo Exposure Correction Conference
Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021.
@conference{6992,
title = {Learning Multi-Scale Photo Exposure Correction},
author = {Mahmoud Afifi and Konstantinos G Derpanis and Björn Ommer and Michael S Brown},
url = {https://github.com/mahmoudnafifi/Exposure_Correction
https://arxiv.org/abs/2003.11596},
year = {2021},
date = {2021-01-01},
urldate = {2021-01-01},
booktitle = {Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Esser, Patrick; Rombach, Robin; Ommer, Björn
Taming Transformers for High-Resolution Image Synthesis Conference
Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021.
@conference{7028,
title = {Taming Transformers for High-Resolution Image Synthesis},
author = {Patrick Esser and Robin Rombach and Björn Ommer},
url = {https://compvis.github.io/taming-transformers/
https://arxiv.org/abs/2012.09841},
year = {2021},
date = {2021-01-01},
urldate = {2021-01-01},
booktitle = {Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Islam, Md Amirul; Kowal, Matthew; Esser, Patrick; Jia, Sen; Ommer, Björn; Derpanis, Konstantinos G; Bruce, Neil
Shape or Texture: Understanding Discriminative Features in CNNs Conference
International Conference on Learning Representations (ICLR), 2021.
@conference{7031,
title = {Shape or Texture: Understanding Discriminative Features in CNNs},
author = {Md Amirul Islam and Matthew Kowal and Patrick Esser and Sen Jia and Björn Ommer and Konstantinos G Derpanis and Neil Bruce},
url = {https://arxiv.org/abs/2101.11604
},
year = {2021},
date = {2021-01-01},
urldate = {2021-01-01},
booktitle = {International Conference on Learning Representations (ICLR)},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Kotovenko, Dmytro; Wright, Matthias; Heimbrecht, Arthur; Ommer, Björn
Rethinking Style Transfer: From Pixels to Parameterized Brushstrokes Conference
Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021.
@conference{7041,
title = {Rethinking Style Transfer: From Pixels to Parameterized Brushstrokes},
author = {Dmytro Kotovenko and Matthias Wright and Arthur Heimbrecht and Björn Ommer},
url = {https://compvis.github.io/brushstroke-parameterized-style-transfer/
https://arxiv.org/abs/2103.17185},
year = {2021},
date = {2021-01-01},
urldate = {2021-01-01},
booktitle = {Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
abstract = {There have been many successful implementations of
neural style transfer in recent years. In most of these works,
the stylization process is confined to the pixel domain. How-
ever, we argue that this representation is unnatural because
paintings usually consist of brushstrokes rather than pixels.
We propose a method to stylize images by optimizing parameterized brushstrokes instead of pixels and further introduce
a simple differentiable rendering mechanism.
Our approach significantly improves visual quality and en-
ables additional control over the stylization process such as
controlling the flow of brushstrokes through user input.
We provide qualitative and quantitative evaluations that
show the efficacy of the proposed parameterized representation.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
neural style transfer in recent years. In most of these works,
the stylization process is confined to the pixel domain. How-
ever, we argue that this representation is unnatural because
paintings usually consist of brushstrokes rather than pixels.
We propose a method to stylize images by optimizing parameterized brushstrokes instead of pixels and further introduce
a simple differentiable rendering mechanism.
Our approach significantly improves visual quality and en-
ables additional control over the stylization process such as
controlling the flow of brushstrokes through user input.
We provide qualitative and quantitative evaluations that
show the efficacy of the proposed parameterized representation.
Blattmann, Andreas; Milbich, Timo; Dorkenwald, Michael; Ommer, Björn
Behavior-Driven Synthesis of Human Dynamics Conference
Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021.
@conference{7044,
title = {Behavior-Driven Synthesis of Human Dynamics},
author = {Andreas Blattmann and Timo Milbich and Michael Dorkenwald and Björn Ommer},
url = {https://compvis.github.io/behavior-driven-video-synthesis/
https://arxiv.org/abs/2103.04677},
year = {2021},
date = {2021-01-01},
urldate = {2021-01-01},
booktitle = {Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Brattoli, Biagio; Büchler, Uta; Dorkenwald, Michael; Reiser, Philipp; Filli, Linard; Helmchen, Fritjof; Wahl, Anna-Sophia; Ommer, Björn
Unsupervised behaviour analysis and magnification (uBAM) using deep learning Journal Article
In: Nature Machine Intelligence, 2021.
@article{7045,
title = {Unsupervised behaviour analysis and magnification (uBAM) using deep learning},
author = {Biagio Brattoli and Uta Büchler and Michael Dorkenwald and Philipp Reiser and Linard Filli and Fritjof Helmchen and Anna-Sophia Wahl and Björn Ommer},
url = {https://utabuechler.github.io/behaviourAnalysis/
https://rdcu.be/ch6pL},
doi = {https://doi.org/10.1038/s42256-021-00326-x},
year = {2021},
date = {2021-01-01},
urldate = {2021-01-01},
journal = {Nature Machine Intelligence},
abstract = {Motor behaviour analysis is essential to biomedical research and clinical diagnostics as it provides a non-invasive strategy for identifying motor impairment and its change caused by interventions. State-of-the-art instrumented movement analysis is time- and cost-intensive, because it requires the placement of physical or virtual markers. As well as the effort required for marking the keypoints or annotations necessary for training or fine-tuning a detector, users need to know the interesting behaviour beforehand to provide meaningful keypoints. Here, we introduce unsupervised behaviour analysis and magnification (uBAM), an automatic deep learning algorithm for analysing behaviour by discovering and magnifying deviations. A central aspect is unsupervised learning of posture and behaviour representations to enable an objective comparison of movement. Besides discovering and quantifying deviations in behaviour, we also propose a generative model for visually magnifying subtle behaviour differences directly in a video without requiring a detour via keypoints or annotations. Essential for this magnification of deviations, even across different individuals, is a disentangling of appearance and behaviour. Evaluations on rodents and human patients with neurological diseases demonstrate the wide applicability of our approach. Moreover, combining optogenetic stimulation with our unsupervised behaviour analysis shows its suitability as a non-invasive diagnostic tool correlating function to brain plasticity.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Roth, Karsten; Milbich, Timo; Ommer, Björn; Cohen, Joseph Paul; Ghassemi, Marzyeh
S2SD: Simultaneous Similarity-based Self-Distillation for Deep Metric Learning Conference
Proceedings of International Conference on Machine Learning (ICML), 2021.
@conference{7051,
title = {S2SD: Simultaneous Similarity-based Self-Distillation for Deep Metric Learning},
author = {Karsten Roth and Timo Milbich and Björn Ommer and Joseph Paul Cohen and Marzyeh Ghassemi},
url = {https://github.com/MLforHealth/S2SD
https://arxiv.org/abs/2009.08348},
year = {2021},
date = {2021-01-01},
urldate = {2021-01-01},
booktitle = {Proceedings of International Conference on Machine Learning (ICML)},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Dorkenwald, Michael; Milbich, Timo; Blattmann, Andreas; Rombach, Robin; Derpanis, Konstantinos G.; Ommer, Björn
Stochastic Image-to-Video Synthesis using cINNs Conference
Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021.
@conference{7053,
title = {Stochastic Image-to-Video Synthesis using cINNs},
author = {Michael Dorkenwald and Timo Milbich and Andreas Blattmann and Robin Rombach and Konstantinos G. Derpanis and Björn Ommer},
url = {https://compvis.github.io/image2video-synthesis-using-cINNs/
https://arxiv.org/abs/2105.04551},
year = {2021},
date = {2021-01-01},
urldate = {2021-01-01},
booktitle = {Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Jahn, Manuel; Rombach, Robin; Ommer, Björn
High-Resolution Complex Scene Synthesis with Transformers Conference
CVPR 2021, AI for Content Creation Workshop, 2021.
@conference{7054,
title = {High-Resolution Complex Scene Synthesis with Transformers},
author = {Manuel Jahn and Robin Rombach and Björn Ommer},
url = {https://compvis.github.io/taming-transformers/
https://arxiv.org/abs/2105.06458},
year = {2021},
date = {2021-01-01},
urldate = {2021-01-01},
booktitle = {CVPR 2021, AI for Content Creation Workshop},
abstract = {The use of coarse-grained layouts for controllable synthesis of complex scene images via deep generative models has recently gained popularity. However, results of current approaches still fall short of their promise of high-resolution synthesis. We hypothesize that this is mostly due to the highly engineered nature of these approaches which often rely on auxiliary losses and intermediate steps such as mask generators. In this note, we present an orthogonal approach to this task, where the generative model is based on pure likelihood training without additional objectives. To do so, we first optimize a powerful compression model with adversarial training which learns to reconstruct its inputs via a discrete latent bottleneck and thereby effectively strips the latent representation of high-frequency details such as texture. Subsequently, we train an autoregressive transformer model to learn the distribution of the discrete image representations conditioned on a tokenized version of the layouts. Our experiments show that the resulting system is able to synthesize high-quality images consistent with the given layouts. In particular, we improve the state-of-the-art FID score on COCO-Stuff and on Visual Genome by up to 19% and 53% and demonstrate the synthesis of images up to 512 x 512 px on COCO and Open Images.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Blattmann, Andreas; Milbich, Timo; Dorkenwald, Michael; Ommer, Björn
Understanding Object Dynamics for Interactive Image-to-Video Synthesis Conference
Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021.
@conference{7063,
title = {Understanding Object Dynamics for Interactive Image-to-Video Synthesis},
author = {Andreas Blattmann and Timo Milbich and Michael Dorkenwald and Björn Ommer},
url = {https://compvis.github.io/interactive-image2video-synthesis/
https://arxiv.org/abs/2106.11303v1},
year = {2021},
date = {2021-01-01},
urldate = {2021-01-01},
booktitle = {Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
abstract = {What would be the effect of locally poking a static scene? We present an approach that learns naturally-looking global articulations caused by a local manipulation at a pixel level. Training requires only videos of moving objects but no information of the underlying manipulation of the physical scene. Our generative model learns to infer natural object dynamics as a response to user interaction and learns about the interrelations between different object body regions. Given a static image of an object and a local poking of a pixel, the approach then predicts how the object would deform over time. In contrast to existing work on video prediction, we do not synthesize arbitrary realistic videos but enable local interactive control of the deformation. Our model is not restricted to particular object categories and can transfer dynamics onto novel unseen object instances. Extensive experiments on diverse objects demonstrate the effectiveness of our approach compared to common video prediction frameworks.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Rombach, Robin; Esser, Patrick; Ommer, Björn
Geometry-Free View Synthesis: Transformers and no 3D Priors Conference
Proceedings of the Intl. Conf. on Computer Vision (ICCV), 2021.
@conference{7067,
title = {Geometry-Free View Synthesis: Transformers and no 3D Priors},
author = {Robin Rombach and Patrick Esser and Björn Ommer},
url = {https://compvis.github.io/geometry-free-view-synthesis/
https://arxiv.org/abs/2104.07652},
year = {2021},
date = {2021-01-01},
urldate = {2021-01-01},
booktitle = {Proceedings of the Intl. Conf. on Computer Vision (ICCV)},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
2020
Milbich, Timo; Ghori, Omair; Ommer, Björn
Unsupervised Representation Learning by Discovering Reliable Image Relations Journal Article
In: Pattern Recognition, vol. 102, 2020.
@article{6339,
title = {Unsupervised Representation Learning by Discovering Reliable Image Relations},
author = {Timo Milbich and Omair Ghori and Björn Ommer},
url = {http://arxiv.org/abs/1911.07808},
year = {2020},
date = {2020-01-01},
urldate = {2020-01-01},
journal = {Pattern Recognition},
volume = {102},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Milbich, Timo; Roth, Karsten; Ommer, Björn
PADS: Policy-Adapted Sampling for Visual Similarity Learning Conference
Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), vol. 1, 2020.
@conference{6386,
title = {PADS: Policy-Adapted Sampling for Visual Similarity Learning},
author = {Timo Milbich and Karsten Roth and Björn Ommer},
url = {https://github.com/Confusezius/CVPR2020_PADS
https://arxiv.org/abs/2003.11113},
year = {2020},
date = {2020-01-01},
urldate = {2020-01-01},
booktitle = {Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
volume = {1},
chapter = {1},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Roth, Karsten; Milbich, Timo; Sinha, Samarth; Gupta, Prateek; Ommer, Björn; Cohen, Joseph Paul
Revisiting Training Strategies and Generalization Performance in Deep Metric Learning Conference
International Conference on Machine Learning (ICML), 2020.
@conference{6390,
title = {Revisiting Training Strategies and Generalization Performance in Deep Metric Learning},
author = {Karsten Roth and Timo Milbich and Samarth Sinha and Prateek Gupta and Björn Ommer and Joseph Paul Cohen},
url = {https://arxiv.org/abs/2002.08473},
year = {2020},
date = {2020-01-01},
urldate = {2020-01-01},
booktitle = {International Conference on Machine Learning (ICML)},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Esser, Patrick; Rombach, Robin; Ommer, Björn
A Disentangling Invertible Interpretation Network for Explaining Latent Representations Conference
Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020.
@conference{6932,
title = {A Disentangling Invertible Interpretation Network for Explaining Latent Representations},
author = {Patrick Esser and Robin Rombach and Björn Ommer},
url = {https://compvis.github.io/iin/
https://arxiv.org/abs/2004.13166},
year = {2020},
date = {2020-01-01},
urldate = {2020-01-01},
booktitle = {Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
abstract = {Neural networks have greatly boosted performance in computer vision by learning powerful representations of input data. The drawback of end-to-end training for maximal overall performance are black-box models whose hidden representations are lacking interpretability: Since distributed coding is optimal for latent layers to improve their robustness, attributing meaning to parts of a hidden feature vector or to individual neurons is hindered. We formulate interpretation as a translation of hidden representations onto semantic concepts that are comprehensible to the user. The mapping between both domains has to be bijective so that semantic modifications in the target domain correctly alter the original representation. The proposed invertible interpretation network can be transparently applied on top of existing architectures with no need to modify or retrain them. Consequently, we translate an original representation to an equivalent yet interpretable one and backwards without affecting the expressiveness and performance of the original. The invertible interpretation network disentangles the hidden representation into separate, semantically meaningful concepts. Moreover, we present an efficient approach to define semantic concepts by only sketching two images and also an unsupervised strategy. Experimental evaluation demonstrates the wide applicability to interpretation of existing classification and image generation networks as well as to semantically guided image manipulation.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Milbich, Timo; Roth, Karsten; Bharadhwaj, Homanga; Sinha, Samarth; Bengio, Yoshua; Ommer, Björn; Cohen, Joseph Paul
DiVA: Diverse Visual Feature Aggregation for Deep Metric Learning Conference
IEEE European Conference on Computer Vision (ECCV), 2020.
@conference{6934,
title = {DiVA: Diverse Visual Feature Aggregation for Deep Metric Learning},
author = {Timo Milbich and Karsten Roth and Homanga Bharadhwaj and Samarth Sinha and Yoshua Bengio and Björn Ommer and Joseph Paul Cohen},
url = {https://arxiv.org/abs/2004.13458},
year = {2020},
date = {2020-01-01},
urldate = {2020-01-01},
booktitle = {IEEE European Conference on Computer Vision (ECCV)},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Rombach, Robin; Esser, Patrick; Ommer, Björn
Making Sense of CNNs: Interpreting Deep Representations & Their Invariances with INNs Conference
IEEE European Conference on Computer Vision (ECCV), 2020.
@conference{6997,
title = {Making Sense of CNNs: Interpreting Deep Representations & Their Invariances with INNs},
author = {Robin Rombach and Patrick Esser and Björn Ommer},
url = {https://compvis.github.io/invariances/
https://arxiv.org/pdf/2008.01777.pdf},
year = {2020},
date = {2020-01-01},
urldate = {2020-01-01},
booktitle = {IEEE European Conference on Computer Vision (ECCV)},
abstract = {To tackle increasingly complex tasks, it has become an essential ability of neural networks to learn abstract representations. These task-specific representations and, particularly, the invariances they capture turn neural networks into black box models that lack interpretability. To open such a black box, it is, therefore, crucial to uncover the different semantic concepts a model has learned as well as those that it has learned to be invariant to. We present an approach based on INNs that (i) recovers the task-specific, learned invariances by disentangling the remaining factor of variation in the data and that (ii) invertibly transforms these recovered invariances combined with the model representation into an equally expressive one with accessible semantic concepts. As a consequence, neural network representations become understandable by providing the means to (i) expose their semantic meaning, (ii) semantically modify a representation, and (iii) visualize individual learned semantic concepts and invariances. Our invertible approach significantly extends the abilities to understand black box models by enabling post-hoc interpretations of state-of-the-art networks without compromising their performance.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Braun, Sandro; Esser, Patrick; Ommer, Björn
Unsupervised Part Discovery by Unsupervised Disentanglement Conference
Proceedings of the German Conference on Pattern Recognition (GCPR) (Oral), Tübingen, 2020.
@conference{7004,
title = {Unsupervised Part Discovery by Unsupervised Disentanglement},
author = {Sandro Braun and Patrick Esser and Björn Ommer},
url = {https://compvis.github.io/unsupervised-part-segmentation/
https://arxiv.org/abs/2009.04264},
year = {2020},
date = {2020-01-01},
urldate = {2020-01-01},
booktitle = {Proceedings of the German Conference on Pattern Recognition (GCPR) (Oral)},
address = {Tübingen},
abstract = {We address the problem of discovering part segmentations of articulated objects without supervision. In contrast to keypoints, part segmentations provide information about part localizations on the level of individual pixels. Capturing both locations and semantics, they are an attractive target for supervised learning approaches. However, large annotation costs limit the scalability of supervised algorithms to other object categories than humans. Unsupervised approaches potentially allow to use much more data at a lower cost. Most existing unsupervised approaches focus on learning abstract representations to be refined with supervision into the final representation. Our approach leverages a generative model consisting of two disentangled representations for an objecttextquoterights shape and appearance and a latent variable for the part segmentation. From a single image, the trained model infers a semantic part segmentation map. In experiments, we compare our approach to previous state-of-the-art approaches and observe significant gains in segmentation accuracy and shape consistency. Our work demonstrates the feasibility to discover semantic part segmentations without supervision.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Rombach, Robin; Esser, Patrick; Ommer, Björn
Network-to-Network Translation with Conditional Invertible Neural Networks Conference
Neural Information Processing Systems (NeurIPS) (Oral), 2020.
@conference{7011,
title = {Network-to-Network Translation with Conditional Invertible Neural Networks},
author = {Robin Rombach and Patrick Esser and Björn Ommer},
url = {https://compvis.github.io/net2net/
https://arxiv.org/abs/2005.13580},
year = {2020},
date = {2020-01-01},
urldate = {2020-01-01},
booktitle = {Neural Information Processing Systems (NeurIPS) (Oral)},
abstract = {Combining stimuli from diverse modalities into a coherent perception is a striking feat of intelligence of evolved brains. This work seeks its analogy in deep learning models and aims to establish relations between existing networks by faithfully combining the representations of these different domains. Therefore, we seek a model that can relate between different existing representations by learning a conditionally invertible mapping between them. The network demonstrates this capability by (i) providing generic transfer between diverse domains, (ii) enabling controlled content synthesis by allowing modification in other domains, and (iii) facilitating diagnosis of existing representations by translating them into an easily accessible domain. Our domain transfer network can translate between fixed representations without having to learn or finetune them. This allows users to utilize various existing domain-specific expert models from the literature that had been trained with extensive computational resources. Experiments on diverse conditional image synthesis tasks, competitive image modification results and experiments on image-to-image and text-to-image generation demonstrate the generic applicability of our approach. In particular, we translate between BERT and BigGAN, state-of-the-art text and image models to provide text-to-image generation, which neither of both experts can perform on their own.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Rombach, Robin; Esser, Patrick; Ommer, Björn
Network Fusion for Content Creation with Conditional INNs Conference
CVPRW 2020 (AI for Content Creation), 2020.
@conference{7012,
title = {Network Fusion for Content Creation with Conditional INNs},
author = {Robin Rombach and Patrick Esser and Björn Ommer},
url = {https://compvis.github.io/network-fusion/
https://arxiv.org/abs/2005.13580},
year = {2020},
date = {2020-01-01},
urldate = {2020-01-01},
booktitle = {CVPRW 2020 (AI for Content Creation)},
abstract = {Artificial Intelligence for Content Creation has the potential to reduce the amount of manual content creation work significantly. While automation of laborious work is welcome, it is only useful if it allows users to control aspects of the creative process when desired. Furthermore, widespread adoption of semi-automatic content creation depends on low barriers regarding the expertise, computational budget and time required to obtain results and experiment with new techniques. With state-of-the-art approaches relying on task-specific models, multi-GPU setups and weeks of training time, we must find ways to reuse and recombine them to meet these requirements. Instead of designing and training methods for controllable content creation from scratch, we thus present a method to repurpose powerful, existing models for new tasks, even though they have never been designed for them. We formulate this problem as a translation between expert models, which includes common content creation scenarios, such as text-to-image and image-to-image translation, as a special case. As this translation is ambiguous, we learn a generative model of hidden representations of one expert conditioned on hidden representations of the other expert. Working on the level of hidden representations makes optimal use of the computational effort that went into the training of the expert model to produce these efficient, low-dimensional representations. Experiments demonstrate that our approach can translate from BERT, a state-of-the-art expert for text, to BigGAN, a state-of-the-art expert for images, to enable text-to-image generation, which neither of the experts can perform on its own. Additional experiments show the wide applicability of our approach across different conditional image synthesis tasks and improvements over existing methods for image modifications.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Esser, Patrick; Rombach, Robin; Ommer, Björn
A Note on Data Biases in Generative Models Conference
NeurIPS 2020 Workshop on Machine Learning for Creativity and Design, 2020.
@conference{7025,
title = {A Note on Data Biases in Generative Models},
author = {Patrick Esser and Robin Rombach and Björn Ommer},
url = {https://neurips2020creativity.github.io/
https://arxiv.org/abs/2012.02516},
year = {2020},
date = {2020-01-01},
urldate = {2020-01-01},
booktitle = {NeurIPS 2020 Workshop on Machine Learning for Creativity and Design},
abstract = {It is tempting to think that machines are less prone to unfairness and prejudice. However, machine learning approaches compute their outputs based on data. While biases can enter at any stage of the development pipeline, models are particularly receptive to mirror biases of the datasets they are trained on and therefore do not necessarily reflect truths about the world but, primarily, truths about the data. To raise awareness about the relationship between modern algorithms and the data that shape them, we use a conditional invertible neural network to disentangle the dataset-specific information from the information which is shared across different datasets. In this way, we can project the same image onto different datasets, thereby revealing their inherent biases. We use this methodology to (i) investigate the impact of dataset quality on the performance of generative models, (ii) show how societal biases of datasets are replicated by generative models, and (iii) present creative applications through unpaired transfer between diverse datasets such as photographs, oil portraits, and animes.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Dencker, Tobias; Klinkisch, Pablo; Maul, Stefan M; Ommer, Björn
Deep learning of cuneiform sign detection with weak supervision using transliteration alignment Journal Article
In: PLoS ONE, vol. 15, 2020.
@article{7029,
title = {Deep learning of cuneiform sign detection with weak supervision using transliteration alignment},
author = {Tobias Dencker and Pablo Klinkisch and Stefan M Maul and Björn Ommer},
url = {https://hci.iwr.uni-heidelberg.de/compvis/projects/cuneiform
https://ommer-lab.com/wp-content/uploads/2021/10/Deep-Learning-of-Cuneiform-Sign-Detection-with-Weak-Supervivion-Using-Transliteration-Alignment.pdf
},
doi = {https://doi.org/10.1371/journal.pone.0243039},
year = {2020},
date = {2020-01-01},
urldate = {2020-01-01},
journal = {PLoS ONE},
volume = {15},
chapter = {1-21},
abstract = {The cuneiform script provides a glimpse into our ancient history. However, reading age-old clay tablets is time-consuming and requires years of training. To simplify this process, we propose a deep-learning based sign detector that locates and classifies cuneiform signs in images of clay tablets. Deep learning requires large amounts of training data in the form of bounding boxes around cuneiform signs, which are not readily available and costly to obtain in the case of cuneiform script. To tackle this problem, we make use of existing transliterations, a sign-by-sign representation of the tablet content in Latin script. Since these do not provide sign localization, we propose a weakly supervised approach: We align tablet images with their corresponding transliterations to localize the transliterated signs in the tablet image, before using these localized signs in place of annotations to re-train the sign detector. A better sign detector in turn boosts the quality of the alignments. We combine these steps in an iterative process that enables training a cuneiform sign detector from transliterations only. While our method works weakly supervised, a small number of annotations further boost the performance of the cuneiform sign detector which we evaluate on a large collection of clay tablets from the Neo-Assyrian period. To enable experts to directly apply the sign detector in their study of cuneiform texts, we additionally provide a web application for the analysis of clay tablets with a trained cuneiform sign detector.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Lang, Sabine; Ommer, Björn
Das Objekt jenseits der Digitalisierung Conference
Das digitale Objekt, vol. 7, 2020, ISBN: 978-3-948808-00-6.
@conference{7038,
title = {Das Objekt jenseits der Digitalisierung},
author = {Sabine Lang and Björn Ommer},
url = {https://www.youtube.com/watch?v=QqupZYCTl98
https://www.deutsches-museum.de/assets/Verlag/Download/Studies/studies-7-download.pdf
},
isbn = {978-3-948808-00-6},
year = {2020},
date = {2020-01-01},
urldate = {2020-01-01},
booktitle = {Das digitale Objekt},
volume = {7},
abstract = {Der technische Fortschritt der letzten Jahrzehnte hat disruptive Veränderungen für Gesellschaft, Wirtschaft und Wissenschaft gebracht: Die Digitalisierung ist ein Resultat dessen und beeinflusst, wie wir auf Daten zugreifen, diese verarbeiten, analysieren und Ergebnisse verbreiten. Obwohl dadurch bereits ein Wandel eingeleitet worden ist, kann das Digitalisieren von Textdokumenten oder Bildern nicht das endgültige Ziel sein. Der Fokus aktueller Bestrebungen sollte vielmehr auf der Möglichkeit der Weiterverarbeitung von Digitalisaten liegen – dies schließt eine intelligente Informationsverarbeitung ein. Der Wert der Digitalisierung besteht nicht in der bloßen Anhäufung digitaler Sammlun- gen, sondern in der Tatsache, dass sie weitaus mehr ermöglicht als das Analoge und dafür die notwendigen Grundvoraussetzungen schafft.
Die Problematik besteht nun darin, dass die meisten Verarbeitungs- und Analyse- methoden für digitale Daten noch analog oder diesen nachempfunden sind: So werden digitale Sammlungen und darin enthaltene Bilder häufig noch mit den eigenen Augen, in traditionell komparativer Weise betrachtet und evaluiert. Dass dies aufgrund der Fülle an Daten nicht effizient ist, muss an dieser Stelle nicht betont werden. Obwohl das ana- loge und das digitale Bild den gleichen Inhalt zeigen können, haben beide doch ganz unterschiedliche Substrate. Ein Unterschied besteht zum Beispiel darin, dass digitale Bilder im Gegensatz zu analogen einfach manipuliert und dupliziert werden können. Das Digitale ist nicht das Analoge in neuer Form, und so bedarf es genuin digitaler Methoden für die Verarbeitung digitaler Daten. Durch die Entwicklung computerge- stützter Verfahren entstehen neue Möglichkeiten, Inhalte zu erschließen: Dazu gehören Ansätze zur Objektsuche oder das Gruppieren und Sortieren der Daten entsprechend benutzerdefinierter Dimensionen; dies schließt übergeordnete Kategorien wie Stil oder Genre, aber auch nuancierte Begriffe wie Alter oder Gewichtung der Bildkomposition ein. Doch das Digitale und entsprechende Verfahren können noch weitaus mehr leisten: Generative Verfahren, wie die Bildsynthese und Stilisierung eines Bildes, ermöglichen eine Blickänderung auf das Artefakt und schließlich die Modifizierung des Objekts selbst. Wie hätte ein Künstler eine uns sichtbare Szene gemalt und dargestellt? Und wie sieht ein Mensch in der Pose eines anderen aus? Dies sind Fragen, die durch die Anwendung com- putergestützter Methoden beantwortet werden können. Für das Museum haben diese Ansätze eine besondere Relevanz, da sie neue Arten des Betrachtens und Vermittelns von Kunstwerken oder zum Beispiel die Rekonstruktion verlorener Artefakte erlauben. In Zusammenarbeit von Mensch und Maschine entstehen so neue effektive Verfahren, die Inhalte erschließen, Verbindungen etablieren und neues Wissen generieren.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Die Problematik besteht nun darin, dass die meisten Verarbeitungs- und Analyse- methoden für digitale Daten noch analog oder diesen nachempfunden sind: So werden digitale Sammlungen und darin enthaltene Bilder häufig noch mit den eigenen Augen, in traditionell komparativer Weise betrachtet und evaluiert. Dass dies aufgrund der Fülle an Daten nicht effizient ist, muss an dieser Stelle nicht betont werden. Obwohl das ana- loge und das digitale Bild den gleichen Inhalt zeigen können, haben beide doch ganz unterschiedliche Substrate. Ein Unterschied besteht zum Beispiel darin, dass digitale Bilder im Gegensatz zu analogen einfach manipuliert und dupliziert werden können. Das Digitale ist nicht das Analoge in neuer Form, und so bedarf es genuin digitaler Methoden für die Verarbeitung digitaler Daten. Durch die Entwicklung computerge- stützter Verfahren entstehen neue Möglichkeiten, Inhalte zu erschließen: Dazu gehören Ansätze zur Objektsuche oder das Gruppieren und Sortieren der Daten entsprechend benutzerdefinierter Dimensionen; dies schließt übergeordnete Kategorien wie Stil oder Genre, aber auch nuancierte Begriffe wie Alter oder Gewichtung der Bildkomposition ein. Doch das Digitale und entsprechende Verfahren können noch weitaus mehr leisten: Generative Verfahren, wie die Bildsynthese und Stilisierung eines Bildes, ermöglichen eine Blickänderung auf das Artefakt und schließlich die Modifizierung des Objekts selbst. Wie hätte ein Künstler eine uns sichtbare Szene gemalt und dargestellt? Und wie sieht ein Mensch in der Pose eines anderen aus? Dies sind Fragen, die durch die Anwendung com- putergestützter Methoden beantwortet werden können. Für das Museum haben diese Ansätze eine besondere Relevanz, da sie neue Arten des Betrachtens und Vermittelns von Kunstwerken oder zum Beispiel die Rekonstruktion verlorener Artefakte erlauben. In Zusammenarbeit von Mensch und Maschine entstehen so neue effektive Verfahren, die Inhalte erschließen, Verbindungen etablieren und neues Wissen generieren.
Dorkenwald, Michael; Büchler, Uta; Ommer, Björn
Unsupervised Magnification of Posture Deviations Across Subjects Conference
Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020.
@conference{7042,
title = {Unsupervised Magnification of Posture Deviations Across Subjects},
author = {Michael Dorkenwald and Uta Büchler and Björn Ommer},
url = {https://compvis.github.io/magnify-posture-deviations/
https://openaccess.thecvf.com/content_CVPR_2020/papers/Dorkenwald_Unsupervised_Magnification_of_Posture_Deviations_Across_Subjects_CVPR_2020_paper.pdf},
year = {2020},
date = {2020-01-01},
urldate = {2020-01-01},
booktitle = {Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Ufer, Nikolai; Lang, Sabine; Ommer, Björn
IEEE European Conference on Computer Vision (ECCV), VISART Workshop , 2020.
@conference{7064,
title = {Object Retrieval and Localization in Large Art Collections Using Deep Multi-style Feature Fusion and Iterative Voting},
author = {Nikolai Ufer and Sabine Lang and Björn Ommer},
url = {https://arxiv.org/abs/2107.06935
},
doi = {https://doi.org/10.1007/978-3-030-66096-3_12},
year = {2020},
date = {2020-01-01},
urldate = {2020-01-01},
booktitle = {IEEE European Conference on Computer Vision (ECCV), VISART Workshop },
abstract = {The search for specific objects or motifs is essential to art history as both assist in decoding the meaning of artworks. Digitization has produced large art collections, but manual methods prove to be insufficient to analyze them. In the following, we introduce an algorithm that allows users to search for image regions containing specific motifs or objects and find similar regions in an extensive dataset, helping art historians to analyze large digitized art collections. Computer vision has presented efficient methods for visual instance retrieval across photographs. However, applied to art collections, they reveal severe deficiencies because of diverse motifs and massive domain shifts induced by differences in techniques, materials, and styles. In this paper, we present a multi-style feature fusion approach that successfully reduces the domain gap and improves retrieval results without labelled data or curated image collections. Our region-based voting with GPU-accelerated approximate nearest-neighbour search allows us to find and localize even small motifs within an extensive dataset in a few seconds. We obtain state-of-the-art results on the Brueghel dataset and demonstrate its generalization to inhomogeneous collections with a large number of distractors.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
2019
Sanakoyeu, A.; Tschernezki, V.; Büchler, Uta; Ommer, Björn
Divide and Conquer the Embedding Space for Metric Learning Conference
Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019.
@conference{6299,
title = {Divide and Conquer the Embedding Space for Metric Learning},
author = {A. Sanakoyeu and V. Tschernezki and Uta Büchler and Björn Ommer},
url = {https://github.com/CompVis/metric-learning-divide-and-conquer
https://arxiv.org/abs/1906.05990},
year = {2019},
date = {2019-01-01},
urldate = {2019-01-01},
booktitle = {Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Kotovenko, Dmytro; Sanakoyeu, A.; Lang, Sabine; Ma, P.; Ommer, Björn
Using a Transformation Content Block For Image Style Transfer Conference
Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019.
@conference{6300,
title = {Using a Transformation Content Block For Image Style Transfer},
author = {Dmytro Kotovenko and A. Sanakoyeu and Sabine Lang and P. Ma and Björn Ommer},
url = {https://compvis.github.io/content-targeted-style-transfer/
https://arxiv.org/abs/2003.08407},
year = {2019},
date = {2019-01-01},
urldate = {2019-01-01},
booktitle = {Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Lorenz, Dominik; Bereska, Leonard; Milbich, Timo; Ommer, Björn
Unsupervised Part-Based Disentangling of Object Shape and Appearance Conference
Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (Oral + Best paper finalist: top 45 / 5160 submissions), 2019.
@conference{6301,
title = {Unsupervised Part-Based Disentangling of Object Shape and Appearance},
author = {Dominik Lorenz and Leonard Bereska and Timo Milbich and Björn Ommer},
url = {https://compvis.github.io/unsupervised-disentangling/
https://arxiv.org/abs/1903.06946},
year = {2019},
date = {2019-01-01},
urldate = {2019-01-01},
booktitle = {Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (Oral + Best paper finalist: top 45 / 5160 submissions)},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Brattoli, Biagio; Roth, Karsten; Ommer, Björn
MIC: Mining Interclass Characteristics for Improved Metric Learning Conference
Proceedings of the Intl. Conf. on Computer Vision (ICCV), 2019.
@conference{6321,
title = {MIC: Mining Interclass Characteristics for Improved Metric Learning},
author = {Biagio Brattoli and Karsten Roth and Björn Ommer},
url = {https://github.com/Confusezius/ICCV2019_MIC
https://arxiv.org/abs/1909.11574},
year = {2019},
date = {2019-01-01},
urldate = {2019-01-01},
booktitle = {Proceedings of the Intl. Conf. on Computer Vision (ICCV)},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Kotovenko, Dmytro; Sanakoyeu, Artsiom; Lang, Sabine; Ommer, Björn
Content and Style Disentanglement for Artistic Style Transfer Conference
Proceedings of the Intl. Conf. on Computer Vision (ICCV), 2019.
@conference{6322,
title = {Content and Style Disentanglement for Artistic Style Transfer},
author = {Dmytro Kotovenko and Artsiom Sanakoyeu and Sabine Lang and Björn Ommer},
url = {https://compvis.github.io/content-style-disentangled-ST/
https://compvis.github.io/content-style-disentangled-ST/paper.pdf},
year = {2019},
date = {2019-01-01},
urldate = {2019-01-01},
booktitle = {Proceedings of the Intl. Conf. on Computer Vision (ICCV)},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Esser, Patrick; Haux, Johannes; Ommer, Björn
Unsupervised Robust Disentangling of Latent Characteristics for Image Synthesis Conference
Proceedings of the Intl. Conf. on Computer Vision (ICCV), 2019.
@conference{6323,
title = {Unsupervised Robust Disentangling of Latent Characteristics for Image Synthesis},
author = {Patrick Esser and Johannes Haux and Björn Ommer},
url = {https://compvis.github.io/robust-disentangling/
https://arxiv.org/abs/1910.10223},
year = {2019},
date = {2019-01-01},
urldate = {2019-01-01},
booktitle = {Proceedings of the Intl. Conf. on Computer Vision (ICCV)},
abstract = {Deep generative models come with the promise to learn an explainable representation for visual objects that allows image sampling, synthesis, and selective modification. The main challenge is to learn to properly model the independent latent characteristics of an object, especially its appearance and pose. We present a novel approach that learns disentangled representations of these characteristics and explains them individually. Training requires only pairs of images depicting the same object appearance, but no pose annotations. We propose an additional classifier that estimates the minimal amount of regularization required to enforce disentanglement. Thus both representations together can completely explain an image while being independent of each other. Previous methods based on adversarial approaches fail to enforce this independence, while methods based on variational approaches lead to uninformative representations. In experiments on diverse object categories, the approach successfully recombines pose and appearance to reconstruct and retarget novel synthesized images. We achieve significant improvements over state-of-the-art methods which utilize the same level of supervision, and reach performances comparable to those of pose-supervised approaches. However, we can handle the vast body of articulated object classes for which no pose models/annotations are available.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Ufer, Nikolai; Lui, Kam To; Schwarz, Katja; Warkentin, Paul; Ommer, Björn
Weakly Supervised Learning of Dense SemanticCorrespondences and Segmentation Conference
German Conference on Pattern Recognition (GCPR), 2019.
@conference{6324,
title = {Weakly Supervised Learning of Dense SemanticCorrespondences and Segmentation},
author = {Nikolai Ufer and Kam To Lui and Katja Schwarz and Paul Warkentin and Björn Ommer},
url = {https://www.researchgate.net/publication/336816354_Weakly_Supervised_Learning_of_Dense_Semantic_Correspondences_and_Segmentation},
doi = {10.1007/978-3-030-33676-9_32},
year = {2019},
date = {2019-01-01},
urldate = {2019-01-01},
booktitle = {German Conference on Pattern Recognition (GCPR)},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
2018
Sanakoyeu, A.; Bautista, Miguel; Ommer, Björn
Deep Unsupervised Learning of Visual Similarities Journal Article
In: Pattern Recognition, vol. 78, 2018.
@article{6229,
title = {Deep Unsupervised Learning of Visual Similarities},
author = {A. Sanakoyeu and Miguel Bautista and Björn Ommer},
url = {https://arxiv.org/abs/1802.08562},
doi = {https://doi.org/10.1016/j.patcog.2018.01.036},
year = {2018},
date = {2018-01-01},
urldate = {2018-01-01},
journal = {Pattern Recognition},
volume = {78},
chapter = {331},
abstract = {Exemplar learning of visual similarities in an unsupervised manner is a problem of paramount importance to Computer Vision. In this context, however, the recent breakthrough in deep learning could not yet unfold its full potential. With only a single positive sample, a great imbalance between one positive and many negatives, and unreliable relationships between most samples, training of Convolutional Neural networks is impaired. In this paper we use weak estimates of local similarities and propose a single optimization problem to extract batches of samples with mutually consistent relations. Conflicting relations are distributed over different batches and similar samples are grouped into compact groups. Learning visual similarities is then framed as a sequence of categorization tasks. The CNN then consolidates transitivity relations within and between groups and learns a single representation for all samples without the need for labels. The proposed unsupervised approach has shown competitive performance on detailed posture analysis and object classification.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Lang, Sabine; Ommer, Björn
Attesting Similarity: Supporting the Organization and Study of Art Image Collections with Computer Vision Journal Article
In: Digital Scholarship in the Humanities, Oxford University Press, vol. 33, no. 4, pp. 845-856, 2018.
@article{6247,
title = {Attesting Similarity: Supporting the Organization and Study of Art Image Collections with Computer Vision},
author = {Sabine Lang and Björn Ommer},
url = {https://doi.org/10.1093/llc/fqy006
https://hci.iwr.uni-heidelberg.de/compvis/projects/digihum
https://academic.oup.com/dsh/article/33/4/845/4964861?guestAccessKey=3eeea652-0c2b-4272-9e42-5ef4c5af6cc4},
year = {2018},
date = {2018-01-01},
urldate = {2018-01-01},
journal = {Digital Scholarship in the Humanities, Oxford University Press},
volume = {33},
number = {4},
pages = {845-856},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Esser, Patrick; Sutter, Ekaterina; Ommer, Björn
A Variational U-Net for Conditional Appearance and Shape Generation Conference
Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (short Oral), 2018.
@conference{6249,
title = {A Variational U-Net for Conditional Appearance and Shape Generation},
author = {Patrick Esser and Ekaterina Sutter and Björn Ommer},
url = {https://compvis.github.io/vunet/
https://arxiv.org/abs/1804.04694},
year = {2018},
date = {2018-01-01},
urldate = {2018-01-02},
booktitle = {Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (short Oral)},
abstract = {Deep generative models have demonstrated great performance in image synthesis. However, results deteriorate in case of spatial deformations, since they generate images of objects directly, rather than modeling the intricate interplay of their inherent shape and appearance. We present a conditional U-Net for shape-guided image generation, conditioned on the output of a variational autoencoder for appearance. The approach is trained end-to-end on images, without requiring samples of the same object with varying pose or appearance. Experiments show that the model enables conditional image generation and transfer. Therefore, either shape or appearance can be retained from a query image, while freely altering the other. Moreover, appearance can be sampled due to its stochastic latent representation, while preserving shape. In quantitative and qualitative experiments on COCO, DeepFashion, shoes, Market-1501 and handbags, the approach demonstrates significant improvements over the state-of-the-art.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Ghori, Omar; Mackowiak, Radek; Bautista, Miguel; Beuter, Niklas; Drumond, Lucas; Diego, Ferran; Ommer, Björn
Learning to Forecast Pedestrian Intention from Pose Dynamics Conference
Intelligent Vehicles, IEEE, 2018.
@conference{6250,
title = {Learning to Forecast Pedestrian Intention from Pose Dynamics},
author = {Omar Ghori and Radek Mackowiak and Miguel Bautista and Niklas Beuter and Lucas Drumond and Ferran Diego and Björn Ommer},
url = {https://www.researchgate.net/publication/328451374_Learning_to_Forecast_Pedestrian_Intention_from_Pose_Dynamics
},
year = {2018},
date = {2018-01-01},
urldate = {2018-01-01},
booktitle = {Intelligent Vehicles, IEEE},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Büchler, Uta; Brattoli, Biagio; Ommer, Björn
Improving Spatiotemporal Self-Supervision by Deep Reinforcement Learning Conference
Proceedings of the European Conference on Computer Vision (ECCV), (UB and BB contributed equally), Munich, Germany, 2018.
@conference{buechler:ECCV:2018,
title = {Improving Spatiotemporal Self-Supervision by Deep Reinforcement Learning},
author = {Uta Büchler and Biagio Brattoli and Björn Ommer},
url = {https://arxiv.org/abs/1807.11293
https://hci.iwr.uni-heidelberg.de/sites/default/files/publications/files/1855931744/buechler_eccv18_poster.pdf},
year = {2018},
date = {2018-01-01},
urldate = {2018-01-01},
booktitle = {Proceedings of the European Conference on Computer Vision (ECCV)},
publisher = {(UB and BB contributed equally)},
address = {Munich, Germany},
abstract = {Self-supervised learning of convolutional neural networks can harness large amounts of cheap unlabeled data to train powerful feature representations. As surrogate task, we jointly address ordering of visual data in the spatial and temporal domain. The permutations of training samples, which are at the core of self-supervision by ordering, have so far been sampled randomly from a fixed preselected set. Based on deep reinforcement learning we propose a sampling policy that adapts to the state of the network, which is being trained. Therefore, new permutations are sampled according to their expected utility for updating the convolutional feature representation. Experimental evaluation on unsupervised and transfer learning tasks demonstrates competitive performance on standard benchmarks for image and video classification and nearest neighbor retrieval.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Sanakoyeu, A.; Kotovenko, Dmytro; Lang, Sabine; Ommer, Björn
A Style-Aware Content Loss for Real-time HD Style Transfer Conference
Proceedings of the European Conference on Computer Vision (ECCV) (Oral), 2018.
@conference{style_aware_content_loss_eccv18,
title = {A Style-Aware Content Loss for Real-time HD Style Transfer},
author = {A. Sanakoyeu and Dmytro Kotovenko and Sabine Lang and Björn Ommer},
url = {https://compvis.github.io/adaptive-style-transfer/
https://arxiv.org/abs/1807.10201},
year = {2018},
date = {2018-01-01},
urldate = {2018-01-01},
booktitle = {Proceedings of the European Conference on Computer Vision (ECCV) (Oral)},
abstract = {Recently, style transfer has received a lot of attention. While much of this research has aimed at speeding up processing, the approaches are still lacking from a principled, art historical standpoint: a style is more than just a single image or an artist, but previous work is limited to only a single instance of a style or shows no benefit from more images. Moreover, previous work has relied on a direct comparison of art in the domain of RGB images or on CNNs pre-trained on ImageNet, which requires millions of labeled object bounding boxes and can introduce an extra bias, since it has been assembled without artistic consideration. To circumvent these issues, we propose a style-aware content loss, which is trained jointly with a deep encoder-decoder network for real-time, high-resolution stylization of images and videos. We propose a quantitative measure for evaluating the quality of a stylized image and also have art historians rank patches from our approach against those from previous work. These and our qualitative results ranging from small image patches to megapixel stylistic images and videos show that our approach better captures the subtle nature in which a style affects content.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Sayed, N.; Brattoli, Biagio; Ommer, Björn
Cross and Learn: Cross-Modal Self-Supervision Conference
German Conference on Pattern Recognition (GCPR) (Oral), Stuttgart, Germany, 2018.
@conference{sayed:GCPR:2018,
title = {Cross and Learn: Cross-Modal Self-Supervision},
author = {N. Sayed and Biagio Brattoli and Björn Ommer},
url = {https://arxiv.org/abs/1811.03879v1
https://bbrattoli.github.io/publications/images/sayed_crossandlearn.pdf},
year = {2018},
date = {2018-01-01},
urldate = {2018-01-01},
booktitle = {German Conference on Pattern Recognition (GCPR) (Oral)},
address = {Stuttgart, Germany},
abstract = {In this paper we present a self-supervised method to learn feature representations for different modalities. Based on the observation that cross-modal information has a high semantic meaning we propose a method to effectively exploit this signal. For our method we utilize video data since it is available on a large scale and provides easily accessible modalities given by RGB and optical flow. We demonstrate state-of-the-art performance on highly contested action recognition datasets in the context of self-supervised learning. We also show the transferability of our feature representations and conduct extensive ablation studies to validate our core contributions.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Blum, O.; Brattoli, Biagio; Ommer, Björn
X-GAN: Improving Generative Adversarial Networks with ConveX Combinations Conference
German Conference on Pattern Recognition (GCPR) (Oral), Stuttgart, Germany, 2018.
@conference{blum:GCPR:2018,
title = {X-GAN: Improving Generative Adversarial Networks with ConveX Combinations},
author = {O. Blum and Biagio Brattoli and Björn Ommer},
url = {https://ommer-lab.com/wp-content/uploads/2021/10/X-Gan_Improving-Generative-Adversarial-Networks-with-ConveX-Combinations.pdf
https://ommer-lab.com/wp-content/uploads/2021/10/xgan_supplementary.pdf
},
year = {2018},
date = {2018-01-01},
urldate = {2018-01-01},
booktitle = {German Conference on Pattern Recognition (GCPR) (Oral)},
address = {Stuttgart, Germany},
abstract = {Even though recent neural architectures for image generation are capable of producing photo-realistic results, the overall distributions of real and faked images still differ a lot. While the lack of a structured latent representation for GANs often results in mode collapse, VAEs enforce a prior to the latent space that leads to an unnatural representation of the underlying real distribution. We introduce a method that preserves the natural structure of the latent manifold. By utilizing neighboring relations within the set of discrete real samples, we reproduce the full continuous latent manifold. We propose a novel image generation network X-GAN that creates latent input vectors from random convex combinations of adjacent real samples. This way we ensure a structured and natural latent space by not requiring prior assumptions. In our experiments, we show that our model outperforms recent approaches in terms of the missing mode problem while maintaining a high image quality.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Esser, Patrick; Haux, Johannes; Milbich, Timo; Ommer, Björn
Towards Learning a Realistic Rendering of Human Behavior Conference
European Conference on Computer Vision (ECCV - HBUGEN), 2018.
@conference{6282,
title = {Towards Learning a Realistic Rendering of Human Behavior},
author = {Patrick Esser and Johannes Haux and Timo Milbich and Björn Ommer},
url = {https://compvis.github.io/hbugen2018/
https://compvis.github.io/hbugen2018/images/rerender.pdf
},
year = {2018},
date = {2018-01-01},
urldate = {2018-01-01},
booktitle = {European Conference on Computer Vision (ECCV - HBUGEN)},
abstract = {Realistic rendering of human behavior is of great interest for applications such as video animations, virtual reality and more generally, gaming engines. Commonly animations of persons performing actions are rendered by articulating explicit 3D models based on sequences of coarse body shape representations simulating a certain behavior. While the simulation of natural behavior can be efficiently learned from common video data, the corresponding 3D models are typically designed in manual, laborious processes or reconstructed from costly (multi-)sensor data. In this work, we present an approach towards a holistic learning framework for rendering human behavior in which all components are learned from easily available data. We utilize motion capture data to generate realistic generations which can be controlled by a user and learn to render characters using only RGB camera data. Our experiments show that we can further improve data efficiency by training on multiple characters at the same time. Overall our approach shows a completely new path towards easily available, personalized avatar creation.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Lang, Sabine; Ommer, Björn
Reconstructing Histories: Analyzing Exhibition Photographs with Computational Methods Journal Article
In: Arts, Computational Aesthetics, vol. 7, 64, 2018.
@article{6283,
title = {Reconstructing Histories: Analyzing Exhibition Photographs with Computational Methods},
author = {Sabine Lang and Björn Ommer},
url = {https://ommer-lab.com/wp-content/uploads/2021/10/Reconstructing-Histories_Analyzing-Exhibition-Photograhs-wit-Computational-Methods.pdf
},
year = {2018},
date = {2018-01-01},
urldate = {2018-01-01},
journal = {Arts, Computational Aesthetics},
volume = {7, 64},
chapter = {1-21},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Lang, Sabine; Ommer, Björn
Reflecting on How Artworks Are Processed and Analyzed by Computer Vision Conference
European Conference on Computer Vision (ECCV - VISART), Springer, 2018.
@conference{6284,
title = {Reflecting on How Artworks Are Processed and Analyzed by Computer Vision},
author = {Sabine Lang and Björn Ommer},
url = {https://ommer-lab.com/wp-content/uploads/2021/10/Reflecting-on-How-artworks-Are-Processed-and-Analyzed-by-Computer-Vision.pdf},
year = {2018},
date = {2018-01-01},
urldate = {2018-01-01},
booktitle = {European Conference on Computer Vision (ECCV - VISART)},
publisher = {Springer},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Bell, P.; Ommer, Björn
Computer Vision und Kunstgeschichte — Dialog zweier Bildwissenschaften Book Chapter
In: Computing Art Reader: Einführung in die digitale Kunstgeschichte, P. Kuroczyński et al. (ed.), 2018.
@inbook{6287,
title = {Computer Vision und Kunstgeschichte — Dialog zweier Bildwissenschaften},
author = {P. Bell and Björn Ommer},
url = {https://hci.iwr.uni-heidelberg.de/sites/default/files/publications/files/1523349512/413-17-83318-2-10-20181210.pdf},
year = {2018},
date = {2018-01-01},
urldate = {2018-01-01},
booktitle = {Computing Art Reader: Einführung in die digitale Kunstgeschichte, P. Kuroczyński et al. (ed.)},
keywords = {},
pubstate = {published},
tppubtype = {inbook}
}
2017
Ufer, Nikolai; Ommer, Björn
Deep Semantic Feature Matching Conference
Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2017.
@conference{6136,
title = {Deep Semantic Feature Matching},
author = {Nikolai Ufer and Björn Ommer},
url = {https://github.com/cl199443/Deep-Semantic-Feature-Matching
https://ommer-lab.com/wp-content/uploads/2021/10/Deep-Semantic-Feature-Matching.pdf},
year = {2017},
date = {2017-01-01},
urldate = {2017-01-01},
booktitle = {Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Brattoli, Biagio; Büchler, Uta; Wahl, Anna-Sophia; Schwab, M. E.; Ommer, Björn
LSTM Self-Supervision for Detailed Behavior Analysis Conference
Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), (BB and UB contributed equally), 2017.
@conference{buechler:CVPR:2017,
title = {LSTM Self-Supervision for Detailed Behavior Analysis},
author = {Biagio Brattoli and Uta Büchler and Anna-Sophia Wahl and M. E. Schwab and Björn Ommer},
url = {https://ommer-lab.com/wp-content/uploads/2021/10/LSTM-Self-Supervision-for-Detailed-Behavior-Analysis.pdf
},
year = {2017},
date = {2017-01-01},
urldate = {2017-01-01},
booktitle = {Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
publisher = {(BB and UB contributed equally)},
abstract = {Behavior analysis provides a crucial non-invasive and
easily accessible diagnostic tool for biomedical research.
A detailed analysis of posture changes during skilled mo-
tor tasks can reveal distinct functional deficits and their
restoration during recovery. Our specific scenario is based
on a neuroscientific study of rodents recovering from a large
sensorimotor cortex stroke and skilled forelimb grasping is
being recorded. Given large amounts of unlabeled videos
that are recorded during such long-term studies, we seek
an approach that captures fine-grained details of posture
and its change during rehabilitation without costly manual
supervision. Therefore, we utilize self-supervision to au-
tomatically learn accurate posture and behavior represen-
tations for analyzing motor function. Learning our model
depends on the following fundamental elements: (i) limb
detection based on a fully convolutional network is ini-
tialized solely using motion information, (ii) a novel self-
supervised training of LSTMs using only temporal permu-
tation yields a detailed representation of behavior, and (iii)
back-propagation of this sequence representation also im-
proves the description of individual postures. We establish a
novel test dataset with expert annotations for evaluation of
fine-grained behavior analysis. Moreover, we demonstrate
the generality of our approach by successfully applying it to
self-supervised learning of human posture on two standard
benchmark datasets.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
easily accessible diagnostic tool for biomedical research.
A detailed analysis of posture changes during skilled mo-
tor tasks can reveal distinct functional deficits and their
restoration during recovery. Our specific scenario is based
on a neuroscientific study of rodents recovering from a large
sensorimotor cortex stroke and skilled forelimb grasping is
being recorded. Given large amounts of unlabeled videos
that are recorded during such long-term studies, we seek
an approach that captures fine-grained details of posture
and its change during rehabilitation without costly manual
supervision. Therefore, we utilize self-supervision to au-
tomatically learn accurate posture and behavior represen-
tations for analyzing motor function. Learning our model
depends on the following fundamental elements: (i) limb
detection based on a fully convolutional network is ini-
tialized solely using motion information, (ii) a novel self-
supervised training of LSTMs using only temporal permu-
tation yields a detailed representation of behavior, and (iii)
back-propagation of this sequence representation also im-
proves the description of individual postures. We establish a
novel test dataset with expert annotations for evaluation of
fine-grained behavior analysis. Moreover, we demonstrate
the generality of our approach by successfully applying it to
self-supervised learning of human posture on two standard
benchmark datasets.
Bautista, Miguel; Fuchs, P.; Ommer, Björn
Learning Where to Drive by Watching Others Conference
Proceedings of the German Conference Pattern Recognition, vol. 1, Springer-Verlag, Basel, 2017.
@conference{6183,
title = {Learning Where to Drive by Watching Others},
author = {Miguel Bautista and P. Fuchs and Björn Ommer},
url = {https://ommer-lab.com/wp-content/uploads/2021/10/Learning-Where-to-Drive-by-Watching-Others.pdf
https://ommer-lab.com/wp-content/uploads/2021/10/Supplementary-material_Learning-Where-to-Drive.pdf},
year = {2017},
date = {2017-01-01},
urldate = {2017-01-01},
booktitle = {Proceedings of the German Conference Pattern Recognition},
volume = {1},
publisher = {Springer-Verlag},
address = {Basel},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Milbich, Timo; Bautista, Miguel; Sutter, Ekaterina; Ommer, Björn
Unsupervised Video Understanding by Reconciliation of Posture Similarities Conference
Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2017.
@conference{6187,
title = {Unsupervised Video Understanding by Reconciliation of Posture Similarities},
author = {Timo Milbich and Miguel Bautista and Ekaterina Sutter and Björn Ommer},
url = {https://hci.iwr.uni-heidelberg.de/compvis/research/tmilbich_iccv17
https://arxiv.org/abs/1708.01191},
year = {2017},
date = {2017-01-01},
urldate = {2017-01-01},
booktitle = {Proceedings of the IEEE International Conference on Computer Vision (ICCV)},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Sümer, Ömer; Dencker, Tobias; Ommer, Björn
Self-supervised Learning of Pose Embeddings from Spatiotemporal Relations in Videos Conference
Proceedings of the IEEE International Conference on Computer Vision (ICCV), 2017.
@conference{6193,
title = {Self-supervised Learning of Pose Embeddings from Spatiotemporal Relations in Videos},
author = {Ömer Sümer and Tobias Dencker and Björn Ommer},
url = {https://arxiv.org/abs/1708.02179
https://ommer-lab.com/wp-content/uploads/2021/10/Supplementary-Material_Self-Supervised-Learning-of-Pose-Embeddings-from-Spatiotemporal-Relations-in-Videos.pdf},
year = {2017},
date = {2017-01-01},
urldate = {2017-01-01},
booktitle = {Proceedings of the IEEE International Conference on Computer Vision (ICCV)},
abstract = {Human pose analysis is presently dominated by deep convolutional networks trained with extensive manual annotations of joint locations and beyond. To avoid the need for expensive labeling, we exploit spatiotemporal relations in training videos for self-supervised learning of pose embeddings. The key idea is to combine temporal ordering and spatial placement estimation as auxiliary tasks for learning pose similarities in a Siamese convolutional network. Since the self-supervised sampling of both tasks from natural videos can result in ambiguous and incorrect training labels, our method employs a curriculum learning idea that starts training with the most reliable data samples and gradually increases the difficulty. To further refine the training process we mine repetitive poses in individual videos which provide reliable labels while removing inconsistencies. Our pose embeddings capture visual characteristics of human pose that can boost existing supervised representations in human pose estimation and retrieval. We report quantitative and qualitative results on these tasks in Olympic Sports, Leeds Pose Sports and MPII Human Pose datasets.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Bautista, Miguel; Sanakoyeu, A.; Ommer, Björn
Deep Unsupervised Similarity Learning using Partially Ordered Sets Conference
The IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2017.
@conference{6200,
title = {Deep Unsupervised Similarity Learning using Partially Ordered Sets},
author = {Miguel Bautista and A. Sanakoyeu and Björn Ommer},
url = {https://github.com/asanakoy/deep_unsupervised_posets
https://arxiv.org/abs/1704.02268},
year = {2017},
date = {2017-01-01},
urldate = {2017-01-01},
booktitle = {The IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Wahl, Anna-Sophia; Büchler, Uta; Brändli, A.; Brattoli, Biagio; Musall, S.; Kasper, H.; Ineichen, B. V.; Helmchen, F.; Ommer, Björn; Schwab, M. E.
Optogenetically stimulating the intact corticospinal tract post-stroke restores motor control through regionalized functional circuit formation Journal Article
In: Nature Communications, 2017.
@article{nokey,
title = {Optogenetically stimulating the intact corticospinal tract post-stroke restores motor control through regionalized functional circuit formation},
author = {Anna-Sophia Wahl and Uta Büchler and A. Brändli and Biagio Brattoli and S. Musall and H. Kasper and B.V. Ineichen and F. Helmchen and Björn Ommer and M. E. Schwab},
url = {https://www.nature.com/articles/s41467-017-01090-6},
doi = {10.1038/s41467-017-01090-6},
year = {2017},
date = {2017-01-01},
urldate = {2017-01-01},
journal = {Nature Communications},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
2016
Bautista, Miguel; Sanakoyeu, A.; Sutter, E.; Ommer, Björn
CliqueCNN: Deep Unsupervised Exemplar Learning Conference
Proceedings of the Conference on Advances in Neural Information Processing Systems (NIPS), MIT Press, Barcelona, 2016.
@conference{arXiv:1608.08792,
title = {CliqueCNN: Deep Unsupervised Exemplar Learning},
author = {Miguel Bautista and A. Sanakoyeu and E. Sutter and Björn Ommer},
url = {https://github.com/asanakoy/cliquecnn
https://arxiv.org/abs/1608.08792},
year = {2016},
date = {2016-01-01},
urldate = {2016-01-01},
booktitle = {Proceedings of the Conference on Advances in Neural Information Processing Systems (NIPS)},
publisher = {MIT Press},
address = {Barcelona},
abstract = {Exemplar learning is a powerful paradigm for discovering visual similarities in
an unsupervised manner. In this context, however, the recent breakthrough in
deep learning could not yet unfold its full potential. With only a single positive
sample, a great imbalance between one positive and many negatives, and unreliable
relationships between most samples, training of Convolutional Neural networks is
impaired. Given weak estimates of local distance we propose a single optimization
problem to extract batches of samples with mutually consistent relations. Conflict-
ing relations are distributed over different batches and similar samples are grouped
into compact cliques. Learning exemplar similarities is framed as a sequence of
clique categorization tasks. The CNN then consolidates transitivity relations within
and between cliques and learns a single representation for all samples without
the need for labels. The proposed unsupervised approach has shown competitive
performance on detailed posture analysis and object classification.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
an unsupervised manner. In this context, however, the recent breakthrough in
deep learning could not yet unfold its full potential. With only a single positive
sample, a great imbalance between one positive and many negatives, and unreliable
relationships between most samples, training of Convolutional Neural networks is
impaired. Given weak estimates of local distance we propose a single optimization
problem to extract batches of samples with mutually consistent relations. Conflict-
ing relations are distributed over different batches and similar samples are grouped
into compact cliques. Learning exemplar similarities is framed as a sequence of
clique categorization tasks. The CNN then consolidates transitivity relations within
and between cliques and learns a single representation for all samples without
the need for labels. The proposed unsupervised approach has shown competitive
performance on detailed posture analysis and object classification.
Bell, P.; Ommer, Björn
Digital Connoisseur? How Computer Vision Supports Art History Book Chapter
In: Connoisseurship nel XXI secolo. Approcci, Limiti, Prospettive, A. Aggujaro & S. Albl (ed.), Artemide, Rome, 2016.
@inbook{6109,
title = {Digital Connoisseur? How Computer Vision Supports Art History},
author = {P. Bell and Björn Ommer},
url = {https://www.academia.edu/39021895/Digital_Connoisseur_How_Computer_Vision_Supports_Art_History},
year = {2016},
date = {2016-01-01},
urldate = {2016-01-01},
booktitle = {Connoisseurship nel XXI secolo. Approcci, Limiti, Prospettive, A. Aggujaro & S. Albl (ed.)},
publisher = {Artemide},
address = {Rome},
organization = {Artemide},
keywords = {},
pubstate = {published},
tppubtype = {inbook}
}
Bell, Peter; Ommer, Björn
Ramboux und der Computer. Experimente zum automatischen Sehen mit Kopien des Nazareners Proceedings Article
In: Ulf Sölter (Hrsg.): Italien so nah – Johann Anton Ramboux (1790-1866), Wienand Verlag, Köln, 2016.
@inproceedings{nokey,
title = {Ramboux und der Computer. Experimente zum automatischen Sehen mit Kopien des Nazareners},
author = {Bell, Peter and Ommer, Björn},
year = {2016},
date = {2016-01-01},
booktitle = {Ulf Sölter (Hrsg.): Italien so nah – Johann Anton Ramboux (1790-1866)},
journal = {Ulf Sölter (Hrsg.): Italien so nah – Johann Anton Ramboux (1790-1866). Ausstellungskatalog},
publisher = {Wienand Verlag, Köln},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2015
Antic, B.; Büchler, Uta; Wahl, Anna-Sophia; Schwab, M. E.; Ommer, Björn
Spatiotemporal Parsing of Motor Kinematics for Assessing Stroke Recovery Conference
Medical Image Computing and Computer-Assisted Intervention, Springer, 2015.
@conference{antic:MICCAI:2015,
title = {Spatiotemporal Parsing of Motor Kinematics for Assessing Stroke Recovery},
author = {B. Antic and Uta Büchler and Anna-Sophia Wahl and M. E. Schwab and Björn Ommer},
url = {https://ommer-lab.com/wp-content/uploads/2021/10/Spatiotemporal-Parsing-of-Motor-Kinemtaics-for-Assessing-Stroke-Recovery.pdf},
doi = {https://doi.org/10.1007/978-3-319-24574-4_56},
year = {2015},
date = {2015-01-01},
urldate = {2015-01-01},
booktitle = {Medical Image Computing and Computer-Assisted Intervention},
publisher = {Springer},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Rubio, J. C.; Ommer, Björn
Regularizing Max-Margin Exemplars by Reconstruction and Generative Models Conference
Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, IEEE, 2015.
@conference{rubio:CVPR:2015,
title = {Regularizing Max-Margin Exemplars by Reconstruction and Generative Models},
author = {J. C. Rubio and Björn Ommer},
url = {https://ommer-lab.com/wp-content/uploads/2021/10/Regularizing-Max-Margin-Exemplars-by-Reconstruction-and-Generative-Models.pdf},
year = {2015},
date = {2015-01-01},
urldate = {2015-01-01},
booktitle = {Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition},
pages = {4213--4221},
publisher = {IEEE},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Rubio, J. C.; Eigenstetter, A.; Ommer, Björn
Generative Regularization with Latent Topics for Discriminative Object Recognition Journal Article
In: Pattern Recognition, vol. 48, no. 12, pp. 3871–3880, 2015.
@article{rubio:PR:2015,
title = {Generative Regularization with Latent Topics for Discriminative Object Recognition},
author = {J. C. Rubio and A. Eigenstetter and Björn Ommer},
url = {https://ommer-lab.com/wp-content/uploads/2021/10/Generative-Regularization-with-Latent-Topics-for-Discriminative-Object-Recognition.pdf},
year = {2015},
date = {2015-01-01},
urldate = {2015-01-01},
journal = {Pattern Recognition},
volume = {48},
number = {12},
pages = {3871--3880},
publisher = {Elsevier},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Yarlagadda, P.; Ommer, Björn
Beyond the Sum of Parts: Voting with Groups of Dependent Entities Journal Article
In: IEEE Transactions on Pattern Analysis and Machine Intelligence, vol. 37, no. 6, pp. 1134–1147, 2015.
@article{yarlagadda:PAMI:2015,
title = {Beyond the Sum of Parts: Voting with Groups of Dependent Entities},
author = {P. Yarlagadda and Björn Ommer},
url = {https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6926849
https://www.researchgate.net/publication/276073488_Beyond_the_Sum_of_Parts_Voting_with_Groups_of_Dependent_Entities},
doi = {https://doi.org/10.1109/TPAMI.2014.2363456},
year = {2015},
date = {2015-01-01},
urldate = {2015-01-01},
journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence},
volume = {37},
number = {6},
pages = {1134--1147},
publisher = {IEEE},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Bell, P.; Ommer, Björn
Training Argus Journal Article
In: Kunstchronik. Monatsschrift für Kunstwissenschaft, Museumswesen und Denkmalpflege, vol. 68, no. 8, pp. 414–420, 2015.
@article{bell:KunstChr:2015,
title = {Training Argus},
author = {P. Bell and Björn Ommer},
url = {https://ommer-lab.com/wp-content/uploads/2021/10/Training_Argus.pdf},
year = {2015},
date = {2015-01-01},
urldate = {2015-01-01},
journal = {Kunstchronik. Monatsschrift für Kunstwissenschaft, Museumswesen und Denkmalpflege},
volume = {68},
number = {8},
pages = {414--420},
publisher = {Zentralinstitut für Kunstgeschichte},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Antic, B.; Ommer, Björn
Per-Sample Kernel Adaptation for Visual Recognition and Grouping Conference
Proceedings of the IEEE International Conference on Computer Vision, IEEE, 2015.
@conference{antic:ICCV:2015,
title = {Per-Sample Kernel Adaptation for Visual Recognition and Grouping},
author = {B. Antic and Björn Ommer},
url = {https://ieeexplore.ieee.org/document/7410505
https://ommer-lab.com/wp-content/uploads/2021/10/Per-Sample-Kernel-Adaptation-for-Visual-Recognition-and-Grouping.pdf},
year = {2015},
date = {2015-01-01},
urldate = {2015-01-01},
booktitle = {Proceedings of the IEEE International Conference on Computer Vision},
publisher = {IEEE},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Antic, B.; Ommer, Björn
Spatio-temporal Video Parsing for Abnormality Detection Journal Article
In: arXiv, vol. abs/1502.06235, 2015.
@article{antic:arXiv:2015,
title = {Spatio-temporal Video Parsing for Abnormality Detection},
author = {B. Antic and Björn Ommer},
url = {http://arxiv.org/abs/1502.06235
https://arxiv.org/pdf/1502.06235.pdf},
year = {2015},
date = {2015-01-01},
urldate = {2015-01-01},
journal = {arXiv},
volume = {abs/1502.06235},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
2014
Monroy, A.; Bell, P.; Ommer, Björn
Morphological Analysis for Investigating Artistic Images Journal Article
In: Image and Vision Computing, vol. 32, no. 6, pp. 414-423, 2014.
@article{monoroy:IVC:2014,
title = {Morphological Analysis for Investigating Artistic Images},
author = {A. Monroy and P. Bell and Björn Ommer},
url = {https://hci.iwr.uni-heidelberg.de/compvis/projects/digihum
https://ommer-lab.com/wp-content/uploads/2021/10/Morphological-Analysis-for-investigating-artistic-images.pdf},
year = {2014},
date = {2014-01-01},
urldate = {2014-01-01},
journal = {Image and Vision Computing},
volume = {32},
number = {6},
pages = {414-423},
publisher = {Elsevier},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Takami, M.; Bell, P.; Ommer, Björn
An Approach to Large Scale Interactive Retrieval of Cultural Heritage Conference
Eurographics Workshop on Graphics and Cultural Heritage, The Eurographics Association, 2014.
@conference{takami:GCH:2014,
title = {An Approach to Large Scale Interactive Retrieval of Cultural Heritage},
author = {M. Takami and P. Bell and Björn Ommer},
url = {https://hci.iwr.uni-heidelberg.de/compvis/projects/digihum
https://ommer-lab.com/wp-content/uploads/2021/10/An-Approach-to-Large-Scale-Interactive-Retrieval-of-Cultural-Heritage.pdf},
year = {2014},
date = {2014-01-01},
urldate = {2014-01-01},
booktitle = {Eurographics Workshop on Graphics and Cultural Heritage},
publisher = {The Eurographics Association},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Takami, M.; Bell, P.; Ommer, Björn
Offline Learning of Prototypical Negatives for Efficient Online Exemplar SVM Conference
Winter Conference on Applications of Computer Vision, IEEE, 2014.
@conference{takami:WACV:2014,
title = {Offline Learning of Prototypical Negatives for Efficient Online Exemplar SVM},
author = {M. Takami and P. Bell and Björn Ommer},
url = {http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=6836075},
year = {2014},
date = {2014-01-01},
urldate = {2014-01-01},
booktitle = {Winter Conference on Applications of Computer Vision},
pages = {377--384},
publisher = {IEEE},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Kandemir, M; Rubio, J. C.; Schmidt, U.; Wojek, C.; Welbl, J.; Ommer, Björn; Hamprecht, Fred A.
Event Detection by Feature Unpredictability in Phase-Contrast Videos of Cell Cultures Conference
Medical Image Computing and Computer-Assisted Intervention, Springer, 2014.
@conference{kandemir:MICCAI:2014,
title = {Event Detection by Feature Unpredictability in Phase-Contrast Videos of Cell Cultures},
author = {M Kandemir and J. C. Rubio and U. Schmidt and C. Wojek and J. Welbl and Björn Ommer and Fred A. Hamprecht},
url = {https://ommer-lab.com/wp-content/uploads/2021/10/Event-Detection-by-Feature-Unpredictability-in-Phase-Contrast-Videos.pdf},
year = {2014},
date = {2014-01-01},
urldate = {2014-01-01},
booktitle = {Medical Image Computing and Computer-Assisted Intervention},
pages = {154--161},
publisher = {Springer},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Wahl, Anna-Sophia; Omlor, W.; Rubio, J. C.; Chen, J. L.; Zheng, H.; Schröter, A.; Gullo, M.; Weinmann, O.; Kobayashi, K.; Helmchen, F.; Ommer, Björn; Schwab, M. E.
Asynchronous Therapy Restores Motor Control by Rewiring of the Rat Corticospinal Tract after Stroke Journal Article
In: Science, vol. 344, no. 6189, pp. 1250–1255, 2014.
@article{Wahl:Science:2014,
title = {Asynchronous Therapy Restores Motor Control by Rewiring of the Rat Corticospinal Tract after Stroke},
author = {Anna-Sophia Wahl and W. Omlor and J. C. Rubio and J. L. Chen and H. Zheng and A. Schröter and M. Gullo and O. Weinmann and K. Kobayashi and F. Helmchen and Björn Ommer and M. E. Schwab},
url = {https://www.science.org/doi/abs/10.1126/science.1253050},
doi = {https://doi.org/10.1126/science.1253050},
year = {2014},
date = {2014-01-01},
urldate = {2014-01-01},
journal = {Science},
volume = {344},
number = {6189},
pages = {1250--1255},
publisher = {American Association for The Advancement of Science},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Antic, B.; Ommer, Björn
Learning Latent Constituents for Recognition of Group Activities in Video Conference
Proceedings of the European Conference on Computer Vision (ECCV) (Oral), Springer, 2014.
@conference{antic:ECCV:2014,
title = {Learning Latent Constituents for Recognition of Group Activities in Video},
author = {B. Antic and Björn Ommer},
url = {https://ommer-lab.com/wp-content/uploads/2021/10/Learning-Latent-Constituents-for-Recognition-of-Group-Activities-in-Video.pdf},
year = {2014},
date = {2014-01-01},
urldate = {2014-01-01},
booktitle = {Proceedings of the European Conference on Computer Vision (ECCV) (Oral)},
pages = {33--47},
publisher = {Springer},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Eigenstetter, A.; Takami, M.; Ommer, Björn
Randomized Max-Margin Compositions for Visual Recognition Conference
Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, IEEE, 2014.
@conference{eigenstetter:CVPR:2014,
title = {Randomized Max-Margin Compositions for Visual Recognition},
author = {A. Eigenstetter and M. Takami and Björn Ommer},
url = {https://ommer-lab.com/wp-content/uploads/2021/10/Randomized-Max-Margin-Compositions-for-Visual-Recognition.pdf},
year = {2014},
date = {2014-01-01},
urldate = {2014-01-01},
booktitle = {Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition},
pages = {3590--3597},
publisher = {IEEE},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
2013
Antic, B.; Milbich, Timo; Ommer, Björn
Less is More: Video Trimming for Action Recognition Conference
Proceedings of the IEEE International Conference on Computer Vision, Workshop on Understanding Human Activities: Context and Interaction, IEEE, 2013.
@conference{antic:HACI:2013,
title = {Less is More: Video Trimming for Action Recognition},
author = {B. Antic and Timo Milbich and Björn Ommer},
url = {https://ommer-lab.com/wp-content/uploads/2021/10/Less-is-More_Video-Trimming-for-Action-Recognition.pdf},
year = {2013},
date = {2013-01-01},
urldate = {2013-01-01},
booktitle = {Proceedings of the IEEE International Conference on Computer Vision, Workshop on Understanding Human Activities: Context and Interaction},
pages = {515--521},
publisher = {IEEE},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Arnold, M.; Bell, P.; Ommer, Björn
Automated Learning of Self-Similarity and Informative Structures in Architecture Conference
Scientific Computing & Cultural Heritage, 2013.
@conference{arnold:SCCH:2013,
title = {Automated Learning of Self-Similarity and Informative Structures in Architecture},
author = {M. Arnold and P. Bell and Björn Ommer},
year = {2013},
date = {2013-01-01},
booktitle = {Scientific Computing & Cultural Heritage},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Monroy, A.; Bell, P.; Ommer, Björn
A Morphometric Approach to Reception Analysis of Premodern Art Conference
Scientific Computing & Cultural Heritage, 2013.
@conference{monroy:SCCH:2013,
title = {A Morphometric Approach to Reception Analysis of Premodern Art},
author = {A. Monroy and P. Bell and Björn Ommer},
url = {https://ommer-lab.com/wp-content/uploads/2021/10/A-Morphometric-Approach-to-Reception-Analysis-of-Premodern-Art.pdf},
year = {2013},
date = {2013-01-01},
urldate = {2013-01-01},
booktitle = {Scientific Computing & Cultural Heritage},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Ommer, Björn
The Role of Shape in Visual Recognition Book Chapter
In: Shape Perception in Human Computer Vision: An Interdisciplinary Perspective, pp. 373–385, Springer, 2013.
@inbook{ommer:SPHCV:2013,
title = {The Role of Shape in Visual Recognition},
author = {Björn Ommer},
url = {https://ommer-lab.com/wp-content/uploads/2021/10/The-Role-of-Shape-in-Visual-Recognition.pdf},
year = {2013},
date = {2013-01-01},
urldate = {2013-01-01},
booktitle = {Shape Perception in Human Computer Vision: An Interdisciplinary Perspective},
pages = {373--385},
publisher = {Springer},
organization = {Springer},
keywords = {},
pubstate = {published},
tppubtype = {inbook}
}
Bell, P.; Schlecht, J.; Ommer, Björn
Nonverbal Communication in Medieval Illustrations Revisited by Computer Vision and Art History Journal Article
In: Visual Resources Journal, Special Issue on Digital Art History, vol. 29, no. 1-2, pp. 26–37, 2013.
@article{bell:VR:2013,
title = {Nonverbal Communication in Medieval Illustrations Revisited by Computer Vision and Art History},
author = {P. Bell and J. Schlecht and Björn Ommer},
url = {http://www.tandfonline.com/doi/abs/10.1080/01973762.2013.761111
https://ommer-lab.com/wp-content/uploads/2021/10/Nonverbal_Communication_in_Medieval_Illu.pdf},
year = {2013},
date = {2013-01-01},
urldate = {2013-01-01},
journal = {Visual Resources Journal, Special Issue on Digital Art History},
volume = {29},
number = {1-2},
pages = {26--37},
publisher = {Taylor & Francis},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Garbe, Christoph S.; Ommer, Björn
Parameter Estimation in Image Processing and Computer Vision Book Chapter
In: Model Based Parameter Estimation: Theory and Applications, pp. 311–334, Springer, 2013, ISBN: 978-3-642-30366-1.
@inbook{garbe:MBPE:2013,
title = {Parameter Estimation in Image Processing and Computer Vision},
author = {Christoph S. Garbe and Björn Ommer},
url = {https://ommer-lab.com/wp-content/uploads/2021/10/Parameter-Estimation-in-Image-Processing-and-Computer-Vision.pdf},
isbn = {978-3-642-30366-1},
year = {2013},
date = {2013-01-01},
urldate = {2013-01-01},
booktitle = {Model Based Parameter Estimation: Theory and Applications},
pages = {311--334},
publisher = {Springer},
organization = {Springer},
keywords = {},
pubstate = {published},
tppubtype = {inbook}
}
Yarlagadda, P.; Monroy, A.; Carque, Bernd; Ommer, Björn
Towards a Computer-based Understanding of Medieval Images Book Chapter
In: Scientific Computing & Cultural Heritage, pp. 89–97, Springer, 2013, ISBN: 978-3-642-28020-7.
@inbook{yarlagadda:SCCH:2013,
title = {Towards a Computer-based Understanding of Medieval Images},
author = {P. Yarlagadda and A. Monroy and Bernd Carque and Björn Ommer},
url = {http://link.springer.com/chapter/10.1007/978-3-642-28021-4_10},
isbn = {978-3-642-28020-7},
year = {2013},
date = {2013-01-01},
booktitle = {Scientific Computing & Cultural Heritage},
pages = {89--97},
publisher = {Springer},
organization = {Springer},
keywords = {},
pubstate = {published},
tppubtype = {inbook}
}
2012
Eigenstetter, A.; Ommer, Björn
Visual Recognition using Embedded Feature Selection for Curvature Self-Similarity Conference
Proceedings of the Conference on Advances in Neural Information Processing Systems, MIT Press, 2012.
@conference{eigenstetter:NIPS:2012,
title = {Visual Recognition using Embedded Feature Selection for Curvature Self-Similarity},
author = {A. Eigenstetter and Björn Ommer},
url = {https://ommer-lab.com/wp-content/uploads/2021/10/Visual-Recognition-using-Embedded-Feature-Selection.pdf},
year = {2012},
date = {2012-01-01},
urldate = {2012-01-01},
booktitle = {Proceedings of the Conference on Advances in Neural Information Processing Systems},
pages = {377--385},
publisher = {MIT Press},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Yarlagadda, P.; Ommer, Björn
From Meaningful Contours to Discriminative Object Shape Conference
Proceedings of the European Conference on Computer Vision, vol. 7572, Springer, 2012.
@conference{yarlagadda:ECCV:2012,
title = {From Meaningful Contours to Discriminative Object Shape},
author = {P. Yarlagadda and Björn Ommer},
url = {https://ommer-lab.com/wp-content/uploads/2021/10/From-Meaningful-Contours-to-Discriminative-Object-Shape.pdf},
year = {2012},
date = {2012-01-01},
urldate = {2012-01-01},
booktitle = {Proceedings of the European Conference on Computer Vision},
volume = {7572},
pages = {766--779},
publisher = {Springer},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Monroy, A.; Bell, P.; Ommer, Björn
Shaping Art with Art: Morphological Analysis for Investigating Artistic Reproductions Conference
Proceedings of the European Conference on Computer Vision, Workshop on VISART, vol. 7583, Springer, 2012.
@conference{monroy:ECCV:2012,
title = {Shaping Art with Art: Morphological Analysis for Investigating Artistic Reproductions},
author = {A. Monroy and P. Bell and Björn Ommer},
url = {https://hci.iwr.uni-heidelberg.de/compvis/projects/digihum
https://ommer-lab.com/wp-content/uploads/2021/10/Shaping-Art-with-Art_Morphological-Analysis-for-Investigating-Artistic-Reproductions.pdf},
year = {2012},
date = {2012-01-01},
urldate = {2012-01-01},
booktitle = {Proceedings of the European Conference on Computer Vision, Workshop on VISART},
volume = {7583},
pages = {571--580},
publisher = {Springer},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Eigenstetter, A.; Yarlagadda, P.; Ommer, Björn
Max-Margin Regularization for Reducing Accidentalness in Chamfer Matching Conference
Proceedings of the Asian Conference on Computer Vision, Springer, 2012.
@conference{eigenstetter:ACCV:2012,
title = {Max-Margin Regularization for Reducing Accidentalness in Chamfer Matching},
author = {A. Eigenstetter and P. Yarlagadda and Björn Ommer},
url = {https://ommer-lab.com/wp-content/uploads/2021/10/Max-margin-Regularization-for-Reducing-Accidentalness-in-Chamfer-Matching.pdf},
year = {2012},
date = {2012-01-01},
urldate = {2012-01-01},
booktitle = {Proceedings of the Asian Conference on Computer Vision},
pages = {152--163},
publisher = {Springer},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Antic, B.; Ommer, Björn
Robust Multiple-Instance Learning with Superbags Conference
Proceedings of the Asian Conference on Computer Vision (ACCV) (Oral), Springer, 2012.
@conference{antic:ACCV:2012,
title = {Robust Multiple-Instance Learning with Superbags},
author = {B. Antic and Björn Ommer},
url = {https://ommer-lab.com/wp-content/uploads/2021/10/Robust-Multiple-Instance-Learning-with-Superbags.pdf},
year = {2012},
date = {2012-01-01},
urldate = {2012-01-01},
booktitle = {Proceedings of the Asian Conference on Computer Vision (ACCV) (Oral)},
pages = {242--255},
publisher = {Springer},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Yarlagadda, P.; Eigenstetter, A.; Ommer, Björn
Learning Discriminative Chamfer Regularization Conference
BMVC, Springer, 2012.
@conference{yarlagadda:BMVC:2012,
title = {Learning Discriminative Chamfer Regularization},
author = {P. Yarlagadda and A. Eigenstetter and Björn Ommer},
url = {https://hci.iwr.uni-heidelberg.de/content/max-margin-regularization-chamfer-matching
http://www.bmva.org/bmvc/2012/BMVC/paper020/paper020.pdf},
year = {2012},
date = {2012-01-01},
urldate = {2012-01-01},
booktitle = {BMVC},
pages = {1--11},
publisher = {Springer},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Monroy, A.; Ommer, Björn
Beyond Bounding-Boxes: Learning Object Shape by Model-driven Grouping Proceedings
Springer, vol. 7574, 2012.
@proceedings{monroy:ECCV:2012b,
title = {Beyond Bounding-Boxes: Learning Object Shape by Model-driven Grouping},
author = {A. Monroy and Björn Ommer},
url = {https://ommer-lab.com/wp-content/uploads/2021/10/Beyond-Bounding-Boxes_Learning-Object-Shape-by-Model-Driven-Grouping.pdf},
doi = {10.1007/978-3-642-33712-3_42},
year = {2012},
date = {2012-01-01},
urldate = {2012-01-01},
volume = {7574},
pages = {582--595},
publisher = {Springer},
keywords = {},
pubstate = {published},
tppubtype = {proceedings}
}
2011
Antic, B.; Ommer, Björn
Video Parsing for Abnormality Detection Conference
Proceedings of the IEEE International Conference on Computer Vision, IEEE, 2011.
@conference{antic:ICCV:2011,
title = {Video Parsing for Abnormality Detection},
author = {B. Antic and Björn Ommer},
url = {https://hci.iwr.uni-heidelberg.de/content/video-parsing-abnormality-detection
https://ommer-lab.com/wp-content/uploads/2021/10/Video-Parsing-for-Abnormality-Detection.pdf},
year = {2011},
date = {2011-01-01},
urldate = {2011-01-01},
booktitle = {Proceedings of the IEEE International Conference on Computer Vision},
pages = {2415--2422},
publisher = {IEEE},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Schlecht, J.; Ommer, Björn
Contour-based Object Detection Conference
BMVC, 2011.
@conference{schlecht:BMVC:2011,
title = {Contour-based Object Detection},
author = {J. Schlecht and Björn Ommer},
url = {https://hci.iwr.uni-heidelberg.de/content/contour-based-object-detection
https://ommer-lab.com/wp-content/uploads/2021/10/Contour-based-Object-Detection.pdf},
year = {2011},
date = {2011-01-01},
urldate = {2011-01-01},
booktitle = {BMVC},
pages = {1--9},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Schlecht, J.; Carque, Bernd; Ommer, Björn
Detecting Gestures in Medieval Images Conference
Proceedings of the International Conference on Image Processing, IEEE, 2011.
@conference{schlecht:ICIP:2011,
title = {Detecting Gestures in Medieval Images},
author = {J. Schlecht and Bernd Carque and Björn Ommer},
url = {https://hci.iwr.uni-heidelberg.de/compvis/projects/digihum
https://ommer-lab.com/wp-content/uploads/2021/10/Detecting-Gestures-in-Medieval-Images.pdf},
year = {2011},
date = {2011-01-01},
urldate = {2011-01-01},
booktitle = {Proceedings of the International Conference on Image Processing},
pages = {1309--1312},
publisher = {IEEE},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Monroy, A.; Carque, Bernd; Ommer, Björn
Reconstructing the Drawing Process of Reproductions from Medieval Images Conference
Proceedings of the International Conference on Image Processing, IEEE, 2011.
@conference{monroy:ICIP:2011,
title = {Reconstructing the Drawing Process of Reproductions from Medieval Images},
author = {A. Monroy and Bernd Carque and Björn Ommer},
url = {https://hci.iwr.uni-heidelberg.de/content/reconstructing-drawing-process-reproductions-medieval-images
https://ommer-lab.com/wp-content/uploads/2021/10/Reconstructing-the-Drawing-Process-of-Reproductions-from-Medieval-Images.pdf
},
year = {2011},
date = {2011-01-01},
urldate = {2011-01-01},
booktitle = {Proceedings of the International Conference on Image Processing},
pages = {2974--2977},
publisher = {IEEE},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Monroy, A.; Kröger, Thorben; Arnold, M.; Ommer, Björn
Parametric Object Detection for Iconographic Analysis Conference
Scientific Computing & Cultural Heritage, 2011.
@conference{monroy:SCCH:2011,
title = {Parametric Object Detection for Iconographic Analysis},
author = {A. Monroy and Thorben Kröger and M. Arnold and Björn Ommer},
url = {https://hci.iwr.uni-heidelberg.de/compvis/projects/digihum
http://www.academia.edu/9439693/Parametric_Object_Detection_for_Iconographic_Analysis},
year = {2011},
date = {2011-01-01},
urldate = {2011-01-01},
booktitle = {Scientific Computing & Cultural Heritage},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Yarlagadda, P.; Monroy, A.; Carque, Bernd; Ommer, Björn
Conference on Computer Vision and Image Analysis of Art II, vol. 7869, 2011.
@conference{yarlagadda:_:2011,
title = {Top-down Analysis of Low-level Object Relatedness Leading to Semantic Understanding of Medieval Image Collections},
author = {P. Yarlagadda and A. Monroy and Bernd Carque and Björn Ommer},
url = {https://ommer-lab.com/wp-content/uploads/2021/10/Top-Down-Analysis-of-Low-Level-Object-Relatedness-Leading-to-Semantic-Understanding-of-Medieval-Image-Collections.pdf},
year = {2011},
date = {2011-01-01},
urldate = {2011-01-01},
booktitle = {Conference on Computer Vision and Image Analysis of Art II},
volume = {7869},
pages = {61--69},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Monroy, A.; Eigenstetter, A.; Ommer, Björn
Beyond Straight Lines - Object Detection using Curvature Conference
International Conference on Image Processing (ICIP), IEEE, 2011.
@conference{monroy:ICIP:2011b,
title = {Beyond Straight Lines - Object Detection using Curvature},
author = {A. Monroy and A. Eigenstetter and Björn Ommer},
url = {https://ommer-lab.com/wp-content/uploads/2021/10/Beyond-Straight-Lines_Object-Detection-Using-Curvature.pdf},
doi = {10.1109/ICIP.2011.6116485},
year = {2011},
date = {2011-01-01},
urldate = {2011-01-01},
booktitle = {International Conference on Image Processing (ICIP)},
publisher = {IEEE},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
2010
Yarlagadda, P.; Monroy, A.; Ommer, Björn
Voting by Grouping Dependent Parts Conference
Proceedings of the European Conference on Computer Vision, vol. 6315, Springer, 2010.
@conference{yarlagadda:ECCV:2010,
title = {Voting by Grouping Dependent Parts},
author = {P. Yarlagadda and A. Monroy and Björn Ommer},
url = {https://ommer-lab.com/wp-content/uploads/2021/10/Voting-by-Grouping-Dependent-Parts.pdf},
year = {2010},
date = {2010-01-01},
urldate = {2010-01-01},
booktitle = {Proceedings of the European Conference on Computer Vision},
volume = {6315},
pages = {197--210},
publisher = {Springer},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Ommer, Björn; Buhmann, J. M.
Learning the Compositional Nature of Visual Object Categories for Recognition Journal Article
In: IEEE Transactions on Pattern Analysis and Machine Intelligence, vol. 32, no. 3, pp. 501–516, 2010.
@article{ommer:PAMI:2010,
title = {Learning the Compositional Nature of Visual Object Categories for Recognition},
author = {Björn Ommer and J. M. Buhmann},
url = {https://ommer-lab.com/wp-content/uploads/2021/10/Learning-the-Compositional-Nature-of-Visual-Object-Categories-for-Recognition.pdf},
year = {2010},
date = {2010-01-01},
urldate = {2010-01-01},
journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence},
volume = {32},
number = {3},
pages = {501--516},
publisher = {IEEE},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Wagner, J.; Ommer, Björn
Efficiently Clustering Earth Mover's Distance Conference
Proceedings of the Asian Conference on Computer Vision, Springer, 2010.
@conference{wagner:ACCV:2010,
title = {Efficiently Clustering Earth Mover's Distance},
author = {J. Wagner and Björn Ommer},
url = {https://ommer-lab.com/wp-content/uploads/2021/10/Efficiently-Clustering-Earth-Movers-Distance.pdf},
year = {2010},
date = {2010-01-01},
urldate = {2010-01-01},
booktitle = {Proceedings of the Asian Conference on Computer Vision},
pages = {477--488},
publisher = {Springer},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Yarlagadda, P.; Monroy, A.; Carque, Bernd; Ommer, Björn
Recognition and Analysis of Objects in Medieval Images Conference
Proceedings of the Asian Conference on Computer Vision, Workshop on e-Heritage, Springer, 2010.
@conference{yarlagadda:ACCV:2010,
title = {Recognition and Analysis of Objects in Medieval Images},
author = {P. Yarlagadda and A. Monroy and Bernd Carque and Björn Ommer},
url = {https://ommer-lab.com/wp-content/uploads/2021/10/Recognition-and-Analysis-of-Objects-in-Medieval-Images.pdf},
year = {2010},
date = {2010-01-01},
urldate = {2010-01-01},
booktitle = {Proceedings of the Asian Conference on Computer Vision, Workshop on e-Heritage},
pages = {296--305},
publisher = {Springer},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
2009
Ommer, Björn; Mader, T.; Buhmann, J. M.
Seeing the Objects Behind the Dots: Recognition in Videos from a Moving Camera Journal Article
In: International Journal of Computer Vision, vol. 83, no. 1, pp. 57–71, 2009.
@article{ommer:IJCV:2009,
title = {Seeing the Objects Behind the Dots: Recognition in Videos from a Moving Camera},
author = {Björn Ommer and T. Mader and J. M. Buhmann},
url = {https://ommer-lab.com/wp-content/uploads/2021/10/Seeing-the-Objects-Behind-the-Dots_Recognition-in-Videos-from-a-Moving-Camera.pdf},
year = {2009},
date = {2009-01-01},
urldate = {2009-01-01},
journal = {International Journal of Computer Vision},
volume = {83},
number = {1},
pages = {57--71},
publisher = {Springer},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Ommer, Björn; Malik, J.
Multi-scale Object Detection by Clustering Lines Conference
Proceedings of the IEEE International Conference on Computer Vision, IEEE, 2009.
@conference{ommer:ICCV:2009,
title = {Multi-scale Object Detection by Clustering Lines},
author = {Björn Ommer and J. Malik},
url = {https://ommer-lab.com/wp-content/uploads/2021/10/Multi-Scale-Object-Detection-by-Clustering-Lines.pdf},
year = {2009},
date = {2009-01-01},
urldate = {2009-01-01},
booktitle = {Proceedings of the IEEE International Conference on Computer Vision},
pages = {484--491},
publisher = {IEEE},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Yarlagadda, P.; Monroy, A.; Carque, Bernd; Ommer, Björn
Towards a Computer-based Understanding of Medieval Images Conference
Scientific Computing & Cultural Heritage, Springer, 2009.
@conference{yarlagadda:SCCH:2009,
title = {Towards a Computer-based Understanding of Medieval Images},
author = {P. Yarlagadda and A. Monroy and Bernd Carque and Björn Ommer},
url = {https://hci.iwr.uni-heidelberg.de/content/visual-object-recognition-datasets-pre-modern-images
https://link.springer.com/chapter/10.1007/978-3-642-28021-4_10},
year = {2009},
date = {2009-01-01},
urldate = {2009-01-01},
booktitle = {Scientific Computing & Cultural Heritage},
pages = {89--97},
publisher = {Springer},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Keränen, S. V. E.; DePace, A.; Hendriks, C. L. Luengo; Fowlkes, C.; Arbelaez, P.; Ommer, Björn; Brox, T.; Henriquez, C.; Wunderlich, Z.; Eckenrode, K.; Fischer, B.; Hammonds, A.; Celniker, S. E.
Computational Analysis of Quantitative Changes in Gene Expression and Embryo Morphology between Species Conference
Evolution-The Molecular Landscape, 2009.
@conference{keraenen:2009,
title = {Computational Analysis of Quantitative Changes in Gene Expression and Embryo Morphology between Species},
author = {S. V. E. Keränen and A. DePace and C. L. Luengo Hendriks and C. Fowlkes and P. Arbelaez and Björn Ommer and T. Brox and C. Henriquez and Z. Wunderlich and K. Eckenrode and B. Fischer and A. Hammonds and S. E. Celniker},
year = {2009},
date = {2009-01-01},
urldate = {2009-01-01},
booktitle = {Evolution-The Molecular Landscape},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
2008
Ommer, Björn
Seeing the Objects Behind the Parts: Learning Compositional Models for Visual Recognition Book
VDM Verlag, 2008, ISBN: 978-3-639-02144-8.
@book{ommer:VDM:2008,
title = {Seeing the Objects Behind the Parts: Learning Compositional Models for Visual Recognition},
author = {Björn Ommer},
url = {http://www.amazon.com/Seeing-Objects-Behind-Parts-Compositional/dp/3639021444/ref=sr_1_1?ie=UTF8&s=books&qid=1232659136&sr=1-1},
isbn = {978-3-639-02144-8},
year = {2008},
date = {2008-01-01},
urldate = {2008-01-01},
publisher = {VDM Verlag},
organization = {VDM Verlag},
keywords = {},
pubstate = {published},
tppubtype = {book}
}
2007
Ommer, Björn; Buhmann, J. M.
Compositional Object Recognition, Segmentation, and Tracking in Video Conference
Proceedings of the International Workshop on Energy Minimization Methods in Computer Vision and Pattern Recognition, vol. 4679, Springer, 2007.
@conference{ommer:EMMCVPR:2007,
title = {Compositional Object Recognition, Segmentation, and Tracking in Video},
author = {Björn Ommer and J. M. Buhmann},
url = {https://ommer-lab.com/wp-content/uploads/2021/10/Compositional-Object-Recognition_Segmentation_and-Tracking-in-Video.pdf},
year = {2007},
date = {2007-01-01},
urldate = {2007-01-01},
booktitle = {Proceedings of the International Workshop on Energy Minimization Methods in Computer Vision and Pattern Recognition},
volume = {4679},
pages = {318--333},
publisher = {Springer},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Sigg, C.; Fischer, B.; Ommer, Björn; Roth, V.; Buhmann, J. M.
Nonnegative CCA for Audiovisual Source Separation Conference
International Workshop on Machine Learning for Signal Processing, IEEE, 2007.
@conference{sigg:MLSP:2007,
title = {Nonnegative CCA for Audiovisual Source Separation},
author = {C. Sigg and B. Fischer and Björn Ommer and V. Roth and J. M. Buhmann},
url = {https://ommer-lab.com/wp-content/uploads/2021/10/Nonnegative-CCA-for-Audiovisual-Source-Separation.pdf},
year = {2007},
date = {2007-01-01},
urldate = {2007-01-01},
booktitle = {International Workshop on Machine Learning for Signal Processing},
pages = {253--258},
publisher = {IEEE},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Ommer, Björn; Buhmann, J. M.
Learning the Compositional Nature of Visual Objects Conference
Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, IEEE, 2007.
@conference{ommer:CVPR:2007,
title = {Learning the Compositional Nature of Visual Objects},
author = {Björn Ommer and J. M. Buhmann},
url = {https://ommer-lab.com/wp-content/uploads/2021/10/Learning-the-Compositional-Nature-of-Visual-Objects.pdf},
year = {2007},
date = {2007-01-01},
urldate = {2007-01-01},
booktitle = {Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition},
pages = {1--8},
publisher = {IEEE},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
2006
Roth, V.; Ommer, Björn
Exploiting Low-level Image Segmentation for Object Recognition Conference
Pattern Recognition, Symposium of the DAGM, vol. 4174, Springer Springer, 2006.
@conference{roth:DAGM:2006,
title = {Exploiting Low-level Image Segmentation for Object Recognition},
author = {V. Roth and Björn Ommer},
url = {https://ommer-lab.com/wp-content/uploads/2021/10/Exploiting-Low-level-Image-Segmentation-for-Object-Recognition.pdf},
year = {2006},
date = {2006-01-01},
urldate = {2006-01-01},
booktitle = {Pattern Recognition, Symposium of the DAGM},
volume = {4174},
pages = {11--20},
publisher = {Springer},
organization = {Springer},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Ommer, Björn; Sauter, M.; M., Buhmann J.
Learning Top-Down Grouping of Compositional Hierarchies for Recognition Conference
Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, Workshop on Perceptual Organization in Computer Vision, IEEE, 2006.
@conference{ommer:CVPR:2006,
title = {Learning Top-Down Grouping of Compositional Hierarchies for Recognition},
author = {Björn Ommer and M. Sauter and Buhmann J. M.},
url = {https://ommer-lab.com/wp-content/uploads/2021/10/Learning-Top-Down-Grouping-of-Compositional-Hierarchies-for-Recognition.pdf},
year = {2006},
date = {2006-01-01},
urldate = {2006-01-01},
booktitle = {Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, Workshop on Perceptual Organization in Computer Vision},
pages = {194--194},
publisher = {IEEE},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Ommer, Björn; Buhmann, J. M.
Learning Compositional Categorization Models Conference
Proceedings of the European Conference on Computer Vision, vol. 3953, Springer Springer, 2006.
@conference{ommer:ECCV:2006,
title = {Learning Compositional Categorization Models},
author = {Björn Ommer and J. M. Buhmann},
url = {https://ommer-lab.com/wp-content/uploads/2021/10/Learning-Compositional-Categorization-Models.pdf},
year = {2006},
date = {2006-01-01},
urldate = {2006-01-01},
booktitle = {Proceedings of the European Conference on Computer Vision},
volume = {3953},
pages = {316--329},
publisher = {Springer},
organization = {Springer},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
2005
Ommer, Björn; Buhmann, J. M.
Object Categorization by Compositional Graphical Models Conference
Proceedings of the International Workshop on Energy Minimization Methods in Computer Vision and Pattern Recognition, vol. 3757, Springer, 2005.
@conference{ommer:EMMCVPR:2005,
title = {Object Categorization by Compositional Graphical Models},
author = {Björn Ommer and J. M. Buhmann},
url = {https://ommer-lab.com/wp-content/uploads/2021/10/Object-Categorization-by-Compositional-Graphical-Models.pdf},
year = {2005},
date = {2005-01-01},
urldate = {2005-01-01},
booktitle = {Proceedings of the International Workshop on Energy Minimization Methods in Computer Vision and Pattern Recognition},
volume = {3757},
pages = {235--250},
publisher = {Springer},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
2003
Ommer, Björn; Buhmann, J. M.
A Compositionality Architecture for Perceptual Feature Grouping Conference
Proceedings of the International Workshop on Energy Minimization Methods in Computer Vision and Pattern Recognition, vol. 2683, Springer, 2003.
@conference{ommer:EMMCVPR:2003,
title = {A Compositionality Architecture for Perceptual Feature Grouping},
author = {Björn Ommer and J. M. Buhmann},
url = {https://ommer-lab.com/wp-content/uploads/2021/10/A-Compositionality-Architecture-for-Perceptual-Feature-Grouping.pdf},
year = {2003},
date = {2003-01-01},
urldate = {2003-01-01},
booktitle = {Proceedings of the International Workshop on Energy Minimization Methods in Computer Vision and Pattern Recognition},
volume = {2683},
pages = {275--290},
publisher = {Springer},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}