@INPROCEEDINGS{humayun_CVPR_2011_occlusions, author = {Humayun, Ahmad and Mac Aodha, Oisin and Brostow, Gabriel J.}, title = {{Learning to Find Occlusion Regions}}, booktitle = {Computer Vision and Pattern Recognition (CVPR), Proceedings of IEEE Conference on}, year = {2011}, month = {june}, pages = {2161--2168}, keywords = {Segmentation; Motion Estimation; Occlusions; Video Analysis}, publisher = {IEEE}, url = {http://visual.cs.ucl.ac.uk/pubs/learningOcclusion/}, video = {http://www.youtube.com/watch?v=BO84SIFpW_I}, doi = {10.1109/CVPR.2011.5995517}, ISSN = {1063-6919}, abstract = {{For two consecutive frames in a video, we identify which pixels in the first frame become occluded in the second. Such general-purpose detection of occlusion regions is difficult and important because one-to-one correspondence of imaged scene points is needed for many tracking, video segmentation, and reconstruction algorithms. Our hypothesis is that an effective trained occlusion detector can be generated on the basis of i) a broad spectrum of visual features, and ii) representative but synthetic training sequences. By using a Random Forest based framework for feature selection and training, we found that the proposed feature set was sufficient to frequently assign a high probability of occlusion to just the pixels that were indeed becoming occluded. Our extensive experiments on many sequences support this finding, and while accuracy is certainly still scene-dependent, the proposed classifier could be a useful pre-processing step to exploit temporal information in video.}} }