@inproceedings{69ac0ea61be3416a843b14896e23d82b,
title = "FuseNet: Incorporating depth into semantic segmentation via fusion-based CNN architecture",
abstract = "In this paper we address the problem of semantic labeling of indoor scenes on RGB-D data. With the availability of RGB-D cameras, it is expected that additional depth measurement will improve the accuracy. Here we investigate a solution how to incorporate complementary depth information into a semantic segmentation framework by making use of convolutional neural networks (CNNs). Recently encoder-decoder type fully convolutional CNN architectures have achieved a great success in the field of semantic segmentation. Motivated by this observation we propose an encoder-decoder type network, where the encoder part is composed of two branches of networks that simultaneously extract features from RGB and depth images and fuse depth features into the RGB feature maps as the network goes deeper. Comprehensive experimental evaluations demonstrate that the proposed fusion-based architecture achieves competitive results with the state-of-the-art methods on the challenging SUN RGB-D benchmark obtaining 76.27% global accuracy, 48.30% average class accuracy and 37.29% average intersection-over-union score.",
author = "Caner Hazirbas and Lingni Ma and Csaba Domokos and Daniel Cremers",
note = "Publisher Copyright: {\textcopyright} Springer International Publishing AG 2017.; 13th Asian Conference on Computer Vision, ACCV 2016 ; Conference date: 20-11-2016 Through 24-11-2016",
year = "2017",
doi = "10.1007/978-3-319-54181-5_14",
language = "English",
isbn = "9783319541808",
series = "Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)",
publisher = "Springer Verlag",
editor = "Yoichi Sato and Ko Nishino and Vincent Lepetit and Shang-Hong Lai",
booktitle = "Computer Vision - ACCV 2016 - 13th Asian Conference on Computer Vision, Revised Selected Papers",
}