@conference {Gil:2011:MCS:2095050.2095100, title = {A microbenchmark case study and lessons learned}, booktitle = {Proceedings of the compilation of the co-located workshops on DSM{\textquoteright}11, TMC{\textquoteright}11, AGERE!{\textquoteright}11, AOOPES{\textquoteright}11, NEAT{\textquoteright}11, \&\#38; VMIL{\textquoteright}11}, series = {SPLASH {\textquoteright}11 Workshops}, year = {2011}, pages = {297{\textendash}308}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {The extra abstraction layer posed by the virtual machine, the JIT compilation cycles and the asynchronous garbage collection are the main reasons that make the benchmarking of Java code a delicate task. The primary weapon in battling these is replication: "billions and billions of runs", is phrase sometimes used by practitioners. This paper describes a case study, which consumed hundreds of hours of CPU time, and tries to characterize the inconsistencies in the results we encountered.}, keywords = {benchmark, measurements, steady-state}, isbn = {978-1-4503-1183-0}, doi = {10.1145/2095050.2095100}, url = {http://doi.acm.org/10.1145/2095050.2095100}, author = {Gil, Joseph Yossi and Lenz, Keren and Shimron, Yuval} }