Video
BibTex
@inproceedings{Xu:2021:10.20380/GI2021.23,
author = {Xu, Meng and Shen, Chen and Zhang, Jun and Wang, Zhipeng and Ruan, Zhiwei and Poslad, Stefan and Xu, Pengfei},
title = {A Stricter Constraint Produces Outstanding Matching: Learning More Reliable Image Matching Using a Quadratic Hinge Triplet Loss Network},
booktitle = {Proceedings of Graphics Interface 2021},
series = {GI 2021},
year = {2021},
issn = {0713-5424},
isbn = {978-0-9947868-6-9},
location = {Virtual Event},
pages = {203 -- 209},
numpages = {7},
doi = {10.20380/GI2021.23},
publisher = {Canadian Information Processing Society},
}
Abstract
Image matching is widely used in many applications, such as visualbased localization and 3D reconstruction. Compared with traditional local features (e.g., SIFT) and outlier elimination methods (e.g., RANSAC), learning-based image matching methods (e.g., HardNet and OANet) show a promising performance under challenging environments and large-scale benchmarks. However, the existing learning-based methods suffer from noise in the training data and the existing loss function, e.g., hinge loss, does not work well in image matching networks. In this paper, we propose an end-toend image matching method that with less training data to obtain a more accurate and robust performance. First, a novel data cleaning strategy is proposed to remove the noise in the training dataset. Second, we strengthen the matching constraints by proposing a novel quadratic hinge triplet (QHT) loss function to improve the network. Finally, we apply a stricter OANet for sample judgement to produce more outstanding matching. The proposed method shows the state-of-the-art performance when applied to the large-scale and challenging Phototourism dataset that also reported the 1st place in the CVPR 2020 Image Matching Challenges Workshop Track1 (unlimited keypoints and standard descriptors) using the reconstructed pose accuracy metric.