@inproceedings{24ebd5ae86e649dfbc6d0e8034baef63,
title = "Analysis of social media data using multimodal deep learning for disaster response",
abstract = "Multimedia content in social media platforms provides significant information during disaster events. The types of information shared include reports of injured or deceased people, infrastructure damage, and missing or found people, among others. Although many studies have shown the usefulness of both text and image content for disaster response purposes, the research has been mostly focused on analyzing only the text modality in the past. In this paper, we propose to use both text and image modalities of social media data to learn a joint representation using state-of-the-art deep learning techniques. Specifically, we utilize convolutional neural networks to define a multimodal deep learning architecture with a modality-agnostic shared representation. Extensive experiments on real-world disaster datasets show that the proposed multimodal architecture yields better performance than models trained using a single modality (e.g., either text or image).",
keywords = "Crisis Computing, Multimedia Content, Multimodal Deep Learning, Natural Disasters, Social Media",
author = "Ferda Ofli and Firoj Alam and Muhammad Imran",
note = "Publisher Copyright: {\textcopyright} 2020 Information Systems for Crisis Response and Management, ISCRAM. All rights reserved.; 17th Annual International Conference on Information Systems for Crisis Response and Management, ISCRAM 2020 ; Conference date: 23-05-2021",
year = "2020",
language = "English",
series = "Proceedings of the International ISCRAM Conference",
publisher = "Information Systems for Crisis Response and Management, ISCRAM",
pages = "802--811",
editor = "Hughes, {Amanda Lee} and Fiona McNeill and Zobel, {Christopher W.}",
booktitle = "ISCRAM 2020 - Proceedings",
address = "Spain",
}