@inproceedings{5fe1c67de45e4d288973dddb82835b04,
title = "Hypergraph partitioning implementation for parallelizing matrix-vector multiplication using CUDA GPU-based parallel computing",
abstract = "Calculation of the matrix-vector multiplication in the real-world problems often involves large matrix with arbitrary size. Therefore, parallelization is needed to speed up the calculation process that usually takes a long time. Graph partitioning techniques that have been discussed in the previous studies cannot be used to complete the parallelized calculation of matrix-vector multiplication with arbitrary size. This is due to the assumption of graph partitioning techniques that can only solve the square and symmetric matrix. Hypergraph partitioning techniques will overcome the shortcomings of the graph partitioning technique. This paper addresses the efficient parallelization of matrix-vector multiplication through hypergraph partitioning techniques using CUDA GPU-based parallel computing. CUDA (compute unified device architecture) is a parallel computing platform and programming model that was created by NVIDIA and implemented by the GPU (graphics processing unit).",
keywords = "CUDA, graph partitioning, hypergraph partitioning, matrix-vector, parallelization",
author = "Murni and Alhadi B. and Ernastuti and T. Handhika and Djati Kirani",
note = "Publisher Copyright: {\textcopyright} 2017 Author(s).; 2nd International Symposium on Current Progress in Mathematics and Sciences 2016, ISCPMS 2016 ; Conference date: 01-11-2016 Through 02-11-2016",
year = "2017",
month = jul,
day = "10",
doi = "10.1063/1.4991257",
language = "English",
series = "AIP Conference Proceedings",
publisher = "American Institute of Physics Inc.",
editor = "Sugeng, {Kiki Ariyanti} and Djoko Triyono and Terry Mart",
booktitle = "International Symposium on Current Progress in Mathematics and Sciences 2016, ISCPMS 2016",
}