@inproceedings{546fc2be71254e0bbab0dc09495459c0,
title = "OCCLUSION-INVARIANT REPRESENTATION ALIGNMENT FOR ENTITY RE-IDENTIFICATION",
abstract = "Entity re-identification is the foundation of tracking- and matching-based computer vision tasks, which are widely employed in a variety of applications. However, when trained exclusively on clear images, the models capacity to generalize is significantly affected by the presence of occlusion at referencing time, whereas data argumentation-based approaches are costly to construct without guaranteeing a test-time improvement. To tackle this problem, we propose a domain adaptation framework based on learning representations that generates occlusion-invariant feature representations by aligning the clean image embedding distribution with the occluded one, using a disparity discrepancy metric derived from the siamese network architecture. Without the need for additional processing modules during the inference stage or an expensive occlusion-augmentation-enlarged dataset during the training stage, we could obtain occlusion invariant embeddings that are free of the impact of occluders. Extensive experimental results for two tasks across three datasets indicate the proposed method's robustness and effectiveness to a variety of occlusions at all levels.",
keywords = "Domain adaptation, Occlusion invariant, Re-identification, Siamese network",
author = "Zhanghao Jiang and Ke Xu and Heshan Du and Huan Jin and Zheng Lu and Qian Zhang",
note = "Publisher Copyright: {\textcopyright} 2022 IEEE.; 29th IEEE International Conference on Image Processing, ICIP 2022 ; Conference date: 16-10-2022 Through 19-10-2022",
year = "2022",
doi = "10.1109/ICIP46576.2022.9897996",
language = "English",
series = "Proceedings - International Conference on Image Processing, ICIP",
publisher = "IEEE Computer Society",
pages = "3266--3270",
booktitle = "2022 IEEE International Conference on Image Processing, ICIP 2022 - Proceedings",
address = "United States",
}