@inproceedings{6234a729196b44ee82c213aa6cd36410,
title = "Efficient intranode communication in GPU-accelerated systems",
abstract = "Current implementations of MPI are unaware of accelerator memory (i.e., GPU device memory) and require programmers to explicitly move data between memory spaces. This approach is inefficient, especially for intranode communication where it can result in several extra copy operations. In this work, we integrate GPU-awareness into a popular MPI runtime system and develop techniques to significantly reduce the cost of intranode communication involving one or more GPUs. Experiment results show an up to 2x increase in bandwidth, resulting in an average of 4.3% improvement to the total execution time of a halo exchange benchmark.",
keywords = "CUDA, GPU, Intranode communication, MPI, MPICH2, Nemesis",
author = "Feng Ji and Aji, {Ashwin M.} and James Dinan and Darius Buntinas and Pavan Balaji and Feng, {Wu Chun} and Xiaosong Ma",
year = "2012",
doi = "10.1109/IPDPSW.2012.227",
language = "English",
isbn = "9780769546766",
series = "Proceedings of the 2012 IEEE 26th International Parallel and Distributed Processing Symposium Workshops, IPDPSW 2012",
pages = "1838--1847",
booktitle = "Proceedings of the 2012 IEEE 26th International Parallel and Distributed Processing Symposium Workshops, IPDPSW 2012",
note = "2012 IEEE 26th International Parallel and Distributed Processing Symposium Workshops, IPDPSW 2012 ; Conference date: 21-05-2012 Through 25-05-2012",
}