Keywords (tags) and Publication List
Hayes, Ari B; Li, Lingda; Chavarría-Miranda, Daniel; Song, Shuaiwen Leon; Zhang, Eddy Z Orion: A Framework for GPU Occupancy Tuning Conference Proceedings of the 17th International Middleware Conference (Middleware 2017), Association for Computing Machinery, Trento, Italy, 2016, ISBN: 9781450343008. Abstract | Links | BibTeX | Tags: Concurrent-program compilation, GPU occupancy tuning, Register allocation, Shared memory allocation Hayes, Ari B; Zhang, Eddy Z Unified On-Chip Memory Allocation for SIMT Architecture Conference Proceedings of the 28th ACM International Conference on Supercomputing (ICS 2014), Association for Computing Machinery, Munich, Germany, 2014, ISBN: 9781450326421. Abstract | Links | BibTeX | Tags: Compiler optimization, Concurrency, GPU, Register allocation, Shared memory allocation
2016
title = {Orion: A Framework for GPU Occupancy Tuning},
author = {Ari B Hayes and Lingda Li and Daniel Chavarría-Miranda and Shuaiwen Leon Song and Eddy Z Zhang},
url = {https://doi.org/10.1145/2988336.2988355},
doi = {10.1145/2988336.2988355},
isbn = {9781450343008},
year = {2016},
date = {2016-01-01},
booktitle = {Proceedings of the 17th International Middleware Conference (Middleware 2017)},
publisher = {Association for Computing Machinery},
address = {Trento, Italy},
abstract = {An important feature of modern GPU architectures is variable occupancy. Occupancy measures the ratio between the actual number of threads actively running on a GPU and the maximum number of threads that can be scheduled on a GPU. High-occupancy execution enables a large number of threads to run simultaneously and to hide memory latency, but may increase resource contention. Low-occupancy execution leads to less resource contention, but is less capable of hiding memory latency. Occupancy tuning is an important and challenging problem. A program running at two different occupancy levels can have three to four times difference in performance.We introduce Orion, the first GPU program occupancy tuning framework. The Orion framework automatically generates and chooses occupancy-adaptive code for any given GPU program. It is capable of finding the (near-)optimal occupancy level by combining static and dynamic tuning techniques. We demonstrate the efficiency of Orion with twelve representative benchmarks from the Rodinia benchmark suite and CUDA SDK evaluated on two different GPU architectures, obtaining up to 1.61 times speedup, 62.5% memory resource saving, and 6.7% energy saving compared to the baseline of optimized code compiled by nvcc.},
keywords = {Concurrent-program compilation, GPU occupancy tuning, Register allocation, Shared memory allocation},
pubstate = {published},
tppubtype = {conference}
}
2014
title = {Unified On-Chip Memory Allocation for SIMT Architecture},
author = {Ari B Hayes and Eddy Z Zhang},
url = {https://doi.org/10.1145/2597652.2597685},
doi = {10.1145/2597652.2597685},
isbn = {9781450326421},
year = {2014},
date = {2014-01-01},
booktitle = {Proceedings of the 28th ACM International Conference on Supercomputing (ICS 2014)},
pages = {293–302},
publisher = {Association for Computing Machinery},
address = {Munich, Germany},
abstract = {The popularity of general purpose Graphic Processing Unit (GPU) is largely attributed to the tremendous concurrency enabled by its underlying architecture — single instruction multiple thread (SIMT) architecture. It keeps the context of a significant number of threads in registers to enable fast “context switches” when the processor is stalled due to execution dependence, memory requests and etc. The SIMT architecture has a large register file evenly partitioned among all concurrent threads. Per-thread register usage determines the number of concurrent threads, which strongly affects the whole program performance. Existing register allocation techniques, extensively studied in the past several decades, are oblivious to the register contention due to the concurrent execution of many threads. They are prone to making optimization decisions that benefit single thread but degrade the whole application performance.Is it possible for compilers to make register allocation decisions that can maximize the whole GPU application performance? We tackle this important question from two different aspects in this paper. We first propose an unified on-chip memory allocation framework that uses scratch-pad memory to help: (1) alleviate single-thread register pressure; (2) increase whole application throughput. Secondly, we propose a characterization model for the SIMT execution model in order to achieve a desired on-chip memory partition given the register pressure of a program. Overall, we discovered that it is possible to automatically determine an on-chip memory resource allocation that maximizes concurrency while ensuring good single-thread performance at compile-time. We evaluated our techniques on a representative set of GPU benchmarks with non-trivial register pressure. We are able to achieve up to 1.70 times speedup over the baseline of the traditional register allocation scheme that maximizes single thread performance.},
keywords = {Compiler optimization, Concurrency, GPU, Register allocation, Shared memory allocation},
pubstate = {published},
tppubtype = {conference}
}