@conference {Curtsinger:2013:SSS:2451116.2451141, title = {STABILIZER: Statistically Sound Performance Evaluation}, booktitle = {Proceedings of the Eighteenth International Conference on Architectural Support for Programming Languages and Operating Systems}, series = {ASPLOS {\textquoteright}13}, year = {2013}, month = {03/2013}, pages = {219{\textendash}228}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Researchers and software developers require effective performance evaluation. Researchers must evaluate optimizations or measure overhead. Software developers use automatic performance regression tests to discover when changes improve or degrade performance. The standard methodology is to compare execution times before and after applying changes. Unfortunately, modern architectural features make this approach unsound. Statistically sound evaluation requires multiple samples to test whether one can or cannot (with high confidence) reject the null hypothesis that results are the same before and after. However, caches and branch predictors make performance dependent on machine-specific parameters and the exact layout of code, stack frames, and heap objects. A single binary constitutes just one sample from the space of program layouts, regardless of the number of runs. Since compiler optimizations and code changes also alter layout, it is currently impossible to distinguish the impact of an optimization from that of its layout effects. This paper presents Stabilizer, a system that enables the use of the powerful statistical techniques required for sound performance evaluation on modern architectures. Stabilizer forces executions to sample the space of memory configurations by repeatedly re-randomizing layouts of code, stack, and heap objects at runtime. Stabilizer thus makes it possible to control for layout effects. Re-randomization also ensures that layout effects follow a Gaussian distribution, enabling the use of statistical tests like ANOVA. We demonstrate Stabilizer{\textquoteright}s efficiency (<7\% median overhead) and its effectiveness by evaluating the impact of LLVM{\textquoteright}s optimizations on the SPEC CPU2006 benchmark suite. We find that, while -O2 has a significant impact relative to -O1, the performance impact of -O3 over -O2 optimizations is indistinguishable from random noise.}, keywords = {measurement bias, performance evaluation, randomization}, isbn = {978-1-4503-1870-9}, doi = {10.1145/2451116.2451141}, url = {http://doi.acm.org/10.1145/2451116.2451141}, author = {Curtsinger, Charlie and Berger, Emery D.} }