@inproceedings{84f727035ca84ea6bf40c6a9d5446019,
title = "Unsupervised video hashing by exploiting spatio-temporal feature",
abstract = "Video hashing is a common solution for content-based video retrieval by encoding high-dimensional feature vectors into short binary codes. Videos not only have spatial structure inside each frame but also have temporal correlation structure between frames, while the latter has been largely neglected by many existing methods. Therefore, in this paper we propose to perform video hashing by incorporating the temporal structure as well as the conventional spatial structure. Specifically, the spatial features of videos are obtained by utilizing Convolutional Neural Network (CNN), and the temporal features are established via Long-Short Term Memory (LSTM). The proposed spatio-temporal feature learning framework can be applied to many existing unsupervised hashing methods such as Iterative Quantization (ITQ), Spectral Hashing (SH), and others. Experimental results on the UCF-101 dataset indicate that by simultaneously employing the temporal features and spatial features, our hashing method is able to significantly improve the performance of existing methods which only deploy the spatial feature.",
keywords = "Spatio-temporal feature, Unsupervised method, Video hashing",
author = "Chao Ma and Yun Gu and Wei Liu and Jie Yang and Xiangjian He",
note = "Publisher Copyright: {\textcopyright} Springer International Publishing AG 2016.",
year = "2016",
doi = "10.1007/978-3-319-46675-0_56",
language = "English",
isbn = "9783319466743",
series = "Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)",
publisher = "Springer Verlag",
pages = "511--518",
editor = "Akira Hirose and Minho Lee and Derong Liu and Kenji Doya and Kazushi Ikeda and Seiichi Ozawa",
booktitle = "Neural Information Processing - 23rd International Conference, ICONIP 2016, Proceedings",
address = "Germany",
}