30#ifndef ANKERL_NANOBENCH_H_INCLUDED
31#define ANKERL_NANOBENCH_H_INCLUDED
34#define ANKERL_NANOBENCH_VERSION_MAJOR 4
35#define ANKERL_NANOBENCH_VERSION_MINOR 3
36#define ANKERL_NANOBENCH_VERSION_PATCH 11
46#include <unordered_map>
49#define ANKERL_NANOBENCH(x) ANKERL_NANOBENCH_PRIVATE_##x()
51#define ANKERL_NANOBENCH_PRIVATE_CXX() __cplusplus
52#define ANKERL_NANOBENCH_PRIVATE_CXX98() 199711L
53#define ANKERL_NANOBENCH_PRIVATE_CXX11() 201103L
54#define ANKERL_NANOBENCH_PRIVATE_CXX14() 201402L
55#define ANKERL_NANOBENCH_PRIVATE_CXX17() 201703L
57#if ANKERL_NANOBENCH(CXX) >= ANKERL_NANOBENCH(CXX17)
58# define ANKERL_NANOBENCH_PRIVATE_NODISCARD() [[nodiscard]]
60# define ANKERL_NANOBENCH_PRIVATE_NODISCARD()
64# define ANKERL_NANOBENCH_PRIVATE_IGNORE_PADDED_PUSH() \
65 _Pragma("clang diagnostic push") _Pragma("clang diagnostic ignored \"-Wpadded\"")
66# define ANKERL_NANOBENCH_PRIVATE_IGNORE_PADDED_POP() _Pragma("clang diagnostic pop")
68# define ANKERL_NANOBENCH_PRIVATE_IGNORE_PADDED_PUSH()
69# define ANKERL_NANOBENCH_PRIVATE_IGNORE_PADDED_POP()
73# define ANKERL_NANOBENCH_PRIVATE_IGNORE_EFFCPP_PUSH() _Pragma("GCC diagnostic push") _Pragma("GCC diagnostic ignored \"-Weffc++\"")
74# define ANKERL_NANOBENCH_PRIVATE_IGNORE_EFFCPP_POP() _Pragma("GCC diagnostic pop")
76# define ANKERL_NANOBENCH_PRIVATE_IGNORE_EFFCPP_PUSH()
77# define ANKERL_NANOBENCH_PRIVATE_IGNORE_EFFCPP_POP()
80#if defined(ANKERL_NANOBENCH_LOG_ENABLED)
82# define ANKERL_NANOBENCH_LOG(x) \
84 std::cout << __FUNCTION__ << "@" << __LINE__ << ": " << x << std::endl; \
87# define ANKERL_NANOBENCH_LOG(x) \
92#define ANKERL_NANOBENCH_PRIVATE_PERF_COUNTERS() 0
93#if defined(__linux__) && !defined(ANKERL_NANOBENCH_DISABLE_PERF_COUNTERS)
94# include <linux/version.h>
95# if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)
98# undef ANKERL_NANOBENCH_PRIVATE_PERF_COUNTERS
99# define ANKERL_NANOBENCH_PRIVATE_PERF_COUNTERS() 1
103#if defined(__clang__)
104# define ANKERL_NANOBENCH_NO_SANITIZE(...) __attribute__((no_sanitize(__VA_ARGS__)))
106# define ANKERL_NANOBENCH_NO_SANITIZE(...)
110# define ANKERL_NANOBENCH_PRIVATE_NOINLINE() __declspec(noinline)
112# define ANKERL_NANOBENCH_PRIVATE_NOINLINE() __attribute__((noinline))
117#if defined(__GNUC__) && __GNUC__ < 5
118# define ANKERL_NANOBENCH_IS_TRIVIALLY_COPYABLE(...) __has_trivial_copy(__VA_ARGS__)
120# define ANKERL_NANOBENCH_IS_TRIVIALLY_COPYABLE(...) std::is_trivially_copyable<__VA_ARGS__>::value
125#define ANKERL_NANOBENCH_PRIVATE_NOEXCEPT_STRING_MOVE() std::is_nothrow_move_assignable<std::string>::value
132using Clock = std::conditional<std::chrono::high_resolution_clock::is_steady, std::chrono::high_resolution_clock,
133 std::chrono::steady_clock>::type;
141template <
typename SetupOp>
296void render(std::string
const& mustacheTemplate,
Bench const& bench, std::ostream&
out);
306void render(
char const* mustacheTemplate, std::vector<Result>
const& results, std::ostream&
out);
307void render(std::string
const& mustacheTemplate, std::vector<Result>
const& results, std::ostream&
out);
321char const*
csv() noexcept;
362class PerformanceCounters;
364#if ANKERL_NANOBENCH(PERF_COUNTERS)
365class LinuxPerformanceCounters;
393 std::string mBenchmarkTitle =
"benchmark";
394 std::string mBenchmarkName =
"noname";
395 std::string mUnit =
"op";
397 double mComplexityN = -1.0;
398 size_t mNumEpochs = 11;
399 size_t mClockResolutionMultiple =
static_cast<size_t>(1000);
400 std::chrono::nanoseconds mMaxEpochTime = std::chrono::milliseconds(100);
401 std::chrono::nanoseconds mMinEpochTime = std::chrono::milliseconds(1);
402 uint64_t mMinEpochIterations{1};
404 uint64_t mEpochIterations{0};
405 uint64_t mWarmup = 0;
406 std::ostream* mOut =
nullptr;
407 std::chrono::duration<double> mTimeUnit = std::chrono::nanoseconds{1};
408 std::string mTimeUnitName =
"ns";
409 bool mShowPerformanceCounters =
true;
410 bool mIsRelative =
false;
411 std::unordered_map<std::string, std::string> mContext{};
472 std::vector<std::vector<double>> mNameToMeasurements{};
500 static constexpr uint64_t(min)();
501 static constexpr uint64_t(max)();
517 Rng& operator=(
Rng&&) noexcept = default;
518 ~
Rng() noexcept = default;
545 explicit
Rng(uint64_t seed) noexcept;
546 Rng(uint64_t x, uint64_t y) noexcept;
561 inline uint64_t operator()() noexcept;
579 inline uint32_t bounded(uint32_t range) noexcept;
590 inline
double uniform01() noexcept;
599 template <typename Container>
600 void shuffle(Container& container) noexcept;
611 static constexpr uint64_t
rotl(uint64_t x,
unsigned k) noexcept;
663 template <typename Op>
665 Bench& run(
char const* benchmarkName, Op&& op);
667 template <typename Op>
669 Bench& run(
std::
string const& benchmarkName, Op&& op);
675 template <typename Op>
684 Bench& title(
char const* benchmarkTitle);
709 Bench& context(
char const* variableName,
char const* variableValue);
710 Bench& context(
std::
string const& variableName,
std::
string const& variableValue);
731 template <typename T>
732 Bench& batch(T b) noexcept;
756 Bench& timeUnit(
std::chrono::duration<
double> const& tu,
std::
string const& tuName);
758 ANKERL_NANOBENCH(NODISCARD)
std::chrono::duration<
double> const& timeUnit() const noexcept;
790 Bench& clockResolutionMultiple(
size_t multiple) noexcept;
808 Bench& epochs(
size_t numEpochs) noexcept;
821 Bench& maxEpochTime(
std::chrono::nanoseconds t) noexcept;
834 Bench& minEpochTime(
std::chrono::nanoseconds t) noexcept;
847 Bench& minEpochIterations(uint64_t numIters) noexcept;
856 Bench& epochIterations(uint64_t numIters) noexcept;
868 Bench& warmup(uint64_t numWarmupIters) noexcept;
888 Bench& relative(
bool isRelativeEnabled) noexcept;
919 template <typename Arg>
936 template <typename T>
937 Bench& complexityN(T n) noexcept;
996 template <typename Op>
997 BigO complexityBigO(
char const*
name, Op op) const;
999 template <typename Op>
1000 BigO complexityBigO(
std::
string const&
name, Op op) const;
1020 template <typename SetupOp>
1021 detail::SetupRunner<SetupOp>
setup(SetupOp setupOp);
1024 template <typename SetupOp, typename Op>
1025 Bench& runImpl(SetupOp& setupOp, Op&& op);
1027 template <typename SetupOp>
1031 std::vector<Result> mResults{};
1041template <
typename Arg>
1046#if defined(_MSC_VER)
1047void doNotOptimizeAwaySink(
void const*);
1049template <
typename T>
1057template <
typename T>
1060 asm volatile(
"" : :
"r,m"(val) :
"memory");
1063template <
typename T>
1065# if defined(__clang__)
1067 asm volatile(
"" :
"+r,m"(val) : :
"memory");
1070 asm volatile(
"" :
"+m,r"(val) : :
"memory");
1117#if ANKERL_NANOBENCH(PERF_COUNTERS)
1118 LinuxPerformanceCounters* mPc =
nullptr;
1134 template <
typename Op>
1136 for (
auto& rangeMeasure :
data) {
1137 rangeMeasure.first = op(rangeMeasure.first);
1144 template <
typename Op>
1146 :
BigO(bigOName, mapRangeMeasure(rangeMeasure, rangeToN)) {}
1148 template <
typename Op>
1150 :
BigO(
std::move(bigOName), mapRangeMeasure(rangeMeasure, rangeToN)) {}
1162 double mNormalizedRootMeanSquare{};
1165std::ostream&
operator<<(std::ostream& os, std::vector<ankerl::nanobench::BigO>
const& bigOs);
1173namespace nanobench {
1180 return (std::numeric_limits<uint64_t>::max)();
1184uint64_t
Rng::operator()() noexcept {
1187 mX = UINT64_C(15241094284759029579) * mY;
1188 mY =
rotl(mY - x, 27);
1194uint32_t
Rng::bounded(uint32_t range) noexcept {
1195 uint64_t
const r32 =
static_cast<uint32_t
>(operator()());
1196 auto multiresult = r32 * range;
1197 return static_cast<uint32_t
>(multiresult >> 32U);
1201 auto i = (UINT64_C(0x3ff) << 52U) | (
operator()() >> 12U);
1205 std::memcpy(&d, &i,
sizeof(
double));
1209template <
typename Container>
1211 auto i = container.size();
1214 auto n = operator()();
1216 auto b1 =
static_cast<decltype(i)
>((
static_cast<uint32_t
>(n) *
static_cast<uint64_t
>(i)) >> 32U);
1217 swap(container[--i], container[b1]);
1219 auto b2 =
static_cast<decltype(i)
>(((n >> 32U) *
static_cast<uint64_t
>(i)) >> 32U);
1220 swap(container[--i], container[b2]);
1225constexpr uint64_t
Rng::
rotl(uint64_t x,
unsigned k) noexcept {
1226 return (x <<
k) | (x >> (64U -
k));
1231template <
typename SetupOp>
1235 : mSetupOp(
std::move(setupOp))
1238 template <
typename Op>
1241 return mBench.
runImpl(mSetupOp, std::forward<Op>(op));
1250template <
typename Op>
1253 auto setupOp = [] {};
1254 return runImpl(setupOp, std::forward<Op>(op));
1257template <
typename SetupOp,
typename Op>
1264 while (
auto n = iterationLogic.numIters()) {
1268 Clock::time_point
const before = Clock::now();
1272 Clock::time_point
const after = Clock::now();
1274 pc.updateResults(iterationLogic.numIters());
1275 iterationLogic.
add(after - before, pc);
1281template <
typename SetupOp>
1287template <
typename Op>
1289 name(benchmarkName);
1290 return run(std::forward<Op>(op));
1293template <
typename Op>
1295 name(benchmarkName);
1296 return run(std::forward<Op>(op));
1299template <
typename Op>
1304template <
typename Op>
1311template <
typename T>
1313 mConfig.mBatch =
static_cast<double>(b);
1318template <
typename T>
1320 mConfig.mComplexityN =
static_cast<double>(n);
1325template <
typename Arg>
1332template <
typename Arg>
1339#if defined(_MSC_VER)
1340template <
typename T>
1342 doNotOptimizeAwaySink(&val);
1351#if defined(ANKERL_NANOBENCH_IMPLEMENT)
1357# include <algorithm>
1367# include <stdexcept>
1369# if defined(__linux__)
1372# if ANKERL_NANOBENCH(PERF_COUNTERS)
1375# include <linux/perf_event.h>
1376# include <sys/ioctl.h>
1377# include <sys/syscall.h>
1383namespace nanobench {
1394class StreamStateRestorer;
1396class MarkDownColumn;
1407namespace nanobench {
1409uint64_t splitMix64(uint64_t& state)
noexcept;
1414template <
typename T>
1415inline double d(T t)
noexcept {
1416 return static_cast<double>(
t);
1418inline double d(Clock::duration duration)
noexcept {
1419 return std::chrono::duration_cast<std::chrono::duration<double>>(duration).
count();
1423inline Clock::duration clockResolution() noexcept;
1427namespace templates {
1429char const*
csv() noexcept {
1430 return R
"DELIM("title";"name";"unit";"batch";"elapsed";"error %";"instructions";"branches";"branch misses";"total"
1431{{#result}}"{{title}}";"{{name}}";"{{unit}}";{{batch}};{{median(elapsed)}};{{medianAbsolutePercentError(elapsed)}};{{median(instructions)}};{{median(branchinstructions)}};{{median(branchmisses)}};{{sumProduct(iterations, elapsed)}}
1436 return R
"DELIM(<html>
1439 <script src="https://cdn.plot.ly/plotly-latest.min.js"></script>
1443 <div id="myDiv"></div>
1448 y: [{{#measurement}}{{elapsed}}{{^-last}}, {{/last}}{{/measurement}}],
1452 var title = '{{title}}';
1454 data = data.map(a => Object.assign(a, { boxpoints: 'all', pointpos: 0, type: 'box' }));
1455 var layout = { title: { text: title }, showlegend: false, yaxis: { title: 'time per unit', rangemode: 'tozero', autorange: true } }; Plotly.newPlot('myDiv', data, layout, {responsive: true});
1462char const*
pyperf() noexcept {
1469{{#measurement}} {{elapsed}}{{^-last}},
1470{{/last}}{{/measurement}}
1477 "loops": {{sum(iterations)}},
1478 "inner_loops": {{batch}},
1479 "name": "{{title}}",
1486char const*
json() noexcept {
1490 "title": "{{title}}",
1494 "complexityN": {{complexityN}},
1495 "epochs": {{epochs}},
1496 "clockResolution": {{clockResolution}},
1497 "clockResolutionMultiple": {{clockResolutionMultiple}},
1498 "maxEpochTime": {{maxEpochTime}},
1499 "minEpochTime": {{minEpochTime}},
1500 "minEpochIterations": {{minEpochIterations}},
1501 "epochIterations": {{epochIterations}},
1502 "warmup": {{warmup}},
1503 "relative": {{relative}},
1504 "median(elapsed)": {{median(elapsed)}},
1505 "medianAbsolutePercentError(elapsed)": {{medianAbsolutePercentError(elapsed)}},
1506 "median(instructions)": {{median(instructions)}},
1507 "medianAbsolutePercentError(instructions)": {{medianAbsolutePercentError(instructions)}},
1508 "median(cpucycles)": {{median(cpucycles)}},
1509 "median(contextswitches)": {{median(contextswitches)}},
1510 "median(pagefaults)": {{median(pagefaults)}},
1511 "median(branchinstructions)": {{median(branchinstructions)}},
1512 "median(branchmisses)": {{median(branchmisses)}},
1513 "totalTime": {{sumProduct(iterations, elapsed)}},
1516 "iterations": {{iterations}},
1517 "elapsed": {{elapsed}},
1518 "pagefaults": {{pagefaults}},
1519 "cpucycles": {{cpucycles}},
1520 "contextswitches": {{contextswitches}},
1521 "instructions": {{instructions}},
1522 "branchinstructions": {{branchinstructions}},
1523 "branchmisses": {{branchmisses}}
1524 }{{^-last}},{{/-last}}
1526 }{{^-last}},{{/-last}}
1533 enum class Type { tag, content, section, inverted_section };
1537 std::vector<Node> children;
1542 bool operator==(
char const (&str)[N])
const noexcept {
1544 return static_cast<size_t>(std::distance(begin, end) + 1) == N && 0 == strncmp(str, begin, N - 1);
1550static std::vector<Node> parseMustacheTemplate(
char const** tpl) {
1551 std::vector<Node> nodes;
1554 auto const* begin = std::strstr(*tpl,
"{{");
1555 auto const* end = begin;
1556 if (begin !=
nullptr) {
1559 end = std::strstr(begin,
"}}");
1562 if (begin ==
nullptr || end ==
nullptr) {
1565 nodes.emplace_back(Node{*tpl, *tpl + std::strlen(*tpl), std::vector<Node>{}, Node::Type::content});
1570 nodes.emplace_back(Node{*tpl, begin - 2, std::vector<Node>{}, Node::Type::content});
1582 nodes.emplace_back(Node{begin + 1, end, parseMustacheTemplate(tpl), Node::Type::section});
1587 nodes.emplace_back(Node{begin + 1, end, parseMustacheTemplate(tpl), Node::Type::inverted_section});
1591 nodes.emplace_back(Node{begin, end, std::vector<Node>{}, Node::Type::tag});
1597static bool generateFirstLast(Node
const& n,
size_t idx,
size_t size, std::ostream&
out) {
1599 bool const matchFirst = n ==
"-first";
1600 bool const matchLast = n ==
"-last";
1601 if (!matchFirst && !matchLast) {
1605 bool doWrite =
false;
1606 if (n.type == Node::Type::section) {
1607 doWrite = (matchFirst && idx == 0) || (matchLast && idx == size - 1);
1608 }
else if (n.type == Node::Type::inverted_section) {
1609 doWrite = (matchFirst && idx != 0) || (matchLast && idx != size - 1);
1613 for (
auto const& child : n.children) {
1614 if (child.type == Node::Type::content) {
1615 out.write(child.begin, std::distance(child.begin, child.end));
1622static bool matchCmdArgs(std::string
const& str, std::vector<std::string>& matchResult) {
1623 matchResult.clear();
1624 auto idxOpen = str.find(
'(');
1625 auto idxClose = str.find(
')', idxOpen);
1626 if (idxClose == std::string::npos) {
1630 matchResult.emplace_back(str.substr(0, idxOpen));
1633 matchResult.emplace_back();
1634 for (
size_t i = idxOpen + 1; i != idxClose; ++i) {
1635 if (str[i] ==
' ' || str[i] ==
'\t') {
1639 if (str[i] ==
',') {
1641 matchResult.emplace_back();
1645 matchResult.back() += str[i];
1650static bool generateConfigTag(Node
const& n, Config
const& config, std::ostream&
out) {
1654 out << config.mBenchmarkTitle;
1658 out << config.mBenchmarkName;
1662 out << config.mUnit;
1666 out << config.mBatch;
1669 if (n ==
"complexityN") {
1670 out << config.mComplexityN;
1673 if (n ==
"epochs") {
1674 out << config.mNumEpochs;
1677 if (n ==
"clockResolution") {
1678 out << d(detail::clockResolution());
1681 if (n ==
"clockResolutionMultiple") {
1682 out << config.mClockResolutionMultiple;
1685 if (n ==
"maxEpochTime") {
1686 out << d(config.mMaxEpochTime);
1689 if (n ==
"minEpochTime") {
1690 out << d(config.mMinEpochTime);
1693 if (n ==
"minEpochIterations") {
1694 out << config.mMinEpochIterations;
1697 if (n ==
"epochIterations") {
1698 out << config.mEpochIterations;
1701 if (n ==
"warmup") {
1702 out << config.mWarmup;
1705 if (n ==
"relative") {
1706 out << config.mIsRelative;
1713static std::ostream& generateResultTag(Node
const& n,
Result const& r, std::ostream&
out) {
1714 if (generateConfigTag(n, r.config(),
out)) {
1722 std::vector<std::string> matchResult;
1723 if (matchCmdArgs(std::string(n.begin, n.end), matchResult)) {
1724 if (matchResult.size() == 2) {
1725 if (matchResult[0] ==
"context") {
1726 return out << r.context(matchResult[1]);
1734 if (matchResult[0] ==
"median") {
1735 return out << r.median(m);
1737 if (matchResult[0] ==
"average") {
1738 return out << r.average(m);
1740 if (matchResult[0] ==
"medianAbsolutePercentError") {
1741 return out << r.medianAbsolutePercentError(m);
1743 if (matchResult[0] ==
"sum") {
1744 return out << r.sum(m);
1746 if (matchResult[0] ==
"minimum") {
1747 return out << r.minimum(m);
1749 if (matchResult[0] ==
"maximum") {
1750 return out << r.maximum(m);
1752 }
else if (matchResult.size() == 3) {
1759 if (matchResult[0] ==
"sumProduct") {
1760 return out << r.sumProduct(m1, m2);
1769 throw std::runtime_error(
"command '" + std::string(n.begin, n.end) +
"' not understood");
1772static void generateResultMeasurement(std::vector<Node>
const& nodes,
size_t idx,
Result const& r, std::ostream&
out) {
1773 for (
auto const& n : nodes) {
1774 if (!generateFirstLast(n, idx, r.size(),
out)) {
1777 case Node::Type::content:
1778 out.write(n.begin, std::distance(n.begin, n.end));
1781 case Node::Type::inverted_section:
1782 throw std::runtime_error(
"got a inverted section inside measurement");
1784 case Node::Type::section:
1785 throw std::runtime_error(
"got a section inside measurement");
1787 case Node::Type::tag: {
1792 out << r.get(idx, m);
1801static void generateResult(std::vector<Node>
const& nodes,
size_t idx, std::vector<Result>
const& results, std::ostream&
out) {
1802 auto const& r = results[idx];
1803 for (
auto const& n : nodes) {
1804 if (!generateFirstLast(n, idx, results.size(),
out)) {
1807 case Node::Type::content:
1808 out.write(n.begin, std::distance(n.begin, n.end));
1811 case Node::Type::inverted_section:
1812 throw std::runtime_error(
"got a inverted section inside result");
1814 case Node::Type::section:
1815 if (n ==
"measurement") {
1816 for (
size_t i = 0; i < r.size(); ++i) {
1817 generateResultMeasurement(n.children, i, r,
out);
1820 throw std::runtime_error(
"got a section inside result");
1824 case Node::Type::tag:
1825 generateResultTag(n, r,
out);
1837char const* getEnv(
char const*
name);
1838bool isEndlessRunning(std::string
const&
name);
1839bool isWarningsEnabled();
1841template <
typename T>
1842T parseFile(std::string
const& filename,
bool* fail);
1844void gatherStabilityInformation(std::vector<std::string>& warnings, std::vector<std::string>& recommendations);
1845void printStabilityInformationOnce(std::ostream* outStream);
1848uint64_t& singletonHeaderHash() noexcept;
1851Clock::duration calcClockResolution(
size_t numEvaluations) noexcept;
1858class NumSep :
public std::numpunct<char> {
1860 explicit NumSep(
char sep);
1861 char do_thousands_sep()
const override;
1862 std::string do_grouping()
const override;
1871class StreamStateRestorer {
1873 explicit StreamStateRestorer(std::ostream&
s);
1874 ~StreamStateRestorer();
1880 StreamStateRestorer(StreamStateRestorer
const&) =
delete;
1881 StreamStateRestorer& operator=(StreamStateRestorer
const&) =
delete;
1882 StreamStateRestorer(StreamStateRestorer&&) =
delete;
1883 StreamStateRestorer& operator=(StreamStateRestorer&&) =
delete;
1886 std::ostream& mStream;
1887 std::locale mLocale;
1888 std::streamsize
const mPrecision;
1889 std::streamsize
const mWidth;
1890 std::ostream::char_type
const mFill;
1891 std::ostream::fmtflags
const mFmtFlags;
1898 Number(
int width,
int precision,
double value);
1899 Number(
int width,
int precision, int64_t value);
1903 friend std::ostream&
operator<<(std::ostream& os, Number
const& n);
1904 std::ostream& write(std::ostream& os)
const;
1912std::string to_s(uint64_t n);
1914std::ostream&
operator<<(std::ostream& os, Number
const& n);
1916class MarkDownColumn {
1918 MarkDownColumn(
int w,
int prec, std::string tit, std::string suff,
double val)
noexcept;
1928 std::string mSuffix;
1935 explicit MarkDownCode(std::string
const& what);
1938 friend std::ostream&
operator<<(std::ostream& os, MarkDownCode
const& mdCode);
1939 std::ostream& write(std::ostream& os)
const;
1941 std::string mWhat{};
1944std::ostream&
operator<<(std::ostream& os, MarkDownCode
const& mdCode);
1954namespace nanobench {
1957void render(
char const* mustacheTemplate, std::vector<Result>
const& results, std::ostream&
out) {
1958 detail::fmt::StreamStateRestorer
const restorer(
out);
1960 out.precision(std::numeric_limits<double>::digits10);
1961 auto nodes = templates::parseMustacheTemplate(&mustacheTemplate);
1963 for (
auto const& n : nodes) {
1966 case templates::Node::Type::content:
1967 out.write(n.begin, std::distance(n.begin, n.end));
1970 case templates::Node::Type::inverted_section:
1971 throw std::runtime_error(
"unknown list '" + std::string(n.begin, n.end) +
"'");
1973 case templates::Node::Type::section:
1974 if (n ==
"result") {
1975 const size_t nbResults = results.size();
1976 for (
size_t i = 0; i < nbResults; ++i) {
1977 generateResult(n.children, i, results,
out);
1979 }
else if (n ==
"measurement") {
1980 if (results.size() != 1) {
1981 throw std::runtime_error(
1982 "render: can only use section 'measurement' here if there is a single result, but there are " +
1983 detail::fmt::to_s(results.size()));
1986 auto const& r = results.front();
1987 for (
size_t i = 0; i < r.size(); ++i) {
1988 generateResultMeasurement(n.children, i, r,
out);
1991 throw std::runtime_error(
"render: unknown section '" + std::string(n.begin, n.end) +
"'");
1995 case templates::Node::Type::tag:
1996 if (results.size() == 1) {
1998 generateResultTag(n, results.front(),
out);
2001 if (!generateConfigTag(n, results.back().config(),
out)) {
2002 throw std::runtime_error(
"unknown tag '" + std::string(n.begin, n.end) +
"'");
2010void render(std::string
const& mustacheTemplate, std::vector<Result>
const& results, std::ostream&
out) {
2011 render(mustacheTemplate.c_str(), results,
out);
2014void render(
char const* mustacheTemplate,
const Bench& bench, std::ostream&
out) {
2015 render(mustacheTemplate, bench.results(),
out);
2018void render(std::string
const& mustacheTemplate,
const Bench& bench, std::ostream&
out) {
2019 render(mustacheTemplate.c_str(), bench.results(),
out);
2025# if defined(__clang__)
2026# pragma clang diagnostic push
2027# pragma clang diagnostic ignored "-Wexit-time-destructors"
2029 static PerformanceCounters pc;
2030# if defined(__clang__)
2031# pragma clang diagnostic pop
2040# if defined(_MSC_VER)
2041# pragma optimize("", off)
2042void doNotOptimizeAwaySink(
void const*) {}
2043# pragma optimize("", on)
2046template <
typename T>
2047T parseFile(std::string
const& filename,
bool* fail) {
2048 std::ifstream fin(filename);
2051 if (fail !=
nullptr) {
2057char const* getEnv(
char const*
name) {
2058# if defined(_MSC_VER)
2059# pragma warning(push)
2060# pragma warning(disable : 4996)
2062 return std::getenv(
name);
2063# if defined(_MSC_VER)
2064# pragma warning(pop)
2068bool isEndlessRunning(std::string
const&
name) {
2069 auto const*
const endless = getEnv(
"NANOBENCH_ENDLESS");
2070 return nullptr != endless && endless ==
name;
2074bool isWarningsEnabled() {
2075 auto const*
const suppression = getEnv(
"NANOBENCH_SUPPRESS_WARNINGS");
2076 return nullptr == suppression || suppression == std::string(
"0");
2079void gatherStabilityInformation(std::vector<std::string>& warnings, std::vector<std::string>& recommendations) {
2081 recommendations.clear();
2084 warnings.emplace_back(
"DEBUG defined");
2085 bool const recommendCheckFlags =
true;
2087 bool const recommendCheckFlags =
false;
2090 bool recommendPyPerf =
false;
2091# if defined(__linux__)
2092 auto nprocs = sysconf(_SC_NPROCESSORS_CONF);
2094 warnings.emplace_back(
"couldn't figure out number of processors - no governor, turbo check possible");
2097 for (
long id = 0;
id < nprocs; ++id) {
2098 auto idStr = detail::fmt::to_s(
static_cast<uint64_t
>(
id));
2099 auto sysCpu =
"/sys/devices/system/cpu/cpu" + idStr;
2100 auto minFreq = parseFile<int64_t>(sysCpu +
"/cpufreq/scaling_min_freq",
nullptr);
2101 auto maxFreq = parseFile<int64_t>(sysCpu +
"/cpufreq/scaling_max_freq",
nullptr);
2102 if (minFreq != maxFreq) {
2103 auto minMHz = d(minFreq) / 1000.0;
2104 auto maxMHz = d(maxFreq) / 1000.0;
2105 warnings.emplace_back(
"CPU frequency scaling enabled: CPU " + idStr +
" between " +
2106 detail::fmt::Number(1, 1, minMHz).to_s() +
" and " + detail::fmt::Number(1, 1, maxMHz).to_s() +
2108 recommendPyPerf =
true;
2114 auto currentGovernor = parseFile<std::string>(
"/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor", &fail);
2115 if (!fail &&
"performance" != currentGovernor) {
2116 warnings.emplace_back(
"CPU governor is '" + currentGovernor +
"' but should be 'performance'");
2117 recommendPyPerf =
true;
2120 auto noTurbo = parseFile<int>(
"/sys/devices/system/cpu/intel_pstate/no_turbo", &fail);
2121 if (!fail && noTurbo == 0) {
2122 warnings.emplace_back(
"Turbo is enabled, CPU frequency will fluctuate");
2123 recommendPyPerf =
true;
2128 if (recommendCheckFlags) {
2129 recommendations.emplace_back(
"Make sure you compile for Release");
2131 if (recommendPyPerf) {
2132 recommendations.emplace_back(
"Use 'pyperf system tune' before benchmarking. See https://github.com/psf/pyperf");
2136void printStabilityInformationOnce(std::ostream* outStream) {
2137 static bool shouldPrint =
true;
2138 if (shouldPrint && (
nullptr != outStream) && isWarningsEnabled()) {
2139 auto& os = *outStream;
2140 shouldPrint =
false;
2141 std::vector<std::string> warnings;
2142 std::vector<std::string> recommendations;
2143 gatherStabilityInformation(warnings, recommendations);
2144 if (warnings.empty()) {
2148 os <<
"Warning, results might be unstable:" << std::endl;
2149 for (
auto const& w : warnings) {
2150 os <<
"* " << w << std::endl;
2153 os << std::endl <<
"Recommendations" << std::endl;
2154 for (
auto const& r : recommendations) {
2155 os <<
"* " << r << std::endl;
2161uint64_t& singletonHeaderHash() noexcept {
2162 static uint64_t sHeaderHash{};
2167inline uint64_t hash_combine(uint64_t seed, uint64_t val) {
2168 return seed ^ (val + UINT64_C(0x9e3779b9) + (seed << 6U) + (seed >> 2U));
2172Clock::duration calcClockResolution(
size_t numEvaluations)
noexcept {
2173 auto bestDuration = Clock::duration::max();
2174 Clock::time_point tBegin;
2175 Clock::time_point tEnd;
2176 for (
size_t i = 0; i < numEvaluations; ++i) {
2177 tBegin = Clock::now();
2179 tEnd = Clock::now();
2180 }
while (tBegin == tEnd);
2181 bestDuration = (std::min)(bestDuration, tEnd - tBegin);
2183 return bestDuration;
2187Clock::duration clockResolution() noexcept {
2188 static Clock::duration
const sResolution = calcClockResolution(20);
2193struct IterationLogic::Impl {
2194 enum class State { warmup, upscaling_runtime, measuring, endless };
2196 explicit Impl(Bench
const& bench)
2198 , mResult(bench.config()) {
2199 printStabilityInformationOnce(mBench.output());
2202 mTargetRuntimePerEpoch = detail::clockResolution() * mBench.clockResolutionMultiple();
2203 if (mTargetRuntimePerEpoch > mBench.maxEpochTime()) {
2204 mTargetRuntimePerEpoch = mBench.maxEpochTime();
2206 if (mTargetRuntimePerEpoch < mBench.minEpochTime()) {
2207 mTargetRuntimePerEpoch = mBench.minEpochTime();
2210 if (isEndlessRunning(mBench.name())) {
2211 std::cerr <<
"NANOBENCH_ENDLESS set: running '" << mBench.name() <<
"' endlessly" << std::endl;
2212 mNumIters = (std::numeric_limits<uint64_t>::max)();
2213 mState = State::endless;
2214 }
else if (0 != mBench.warmup()) {
2215 mNumIters = mBench.warmup();
2216 mState = State::warmup;
2217 }
else if (0 != mBench.epochIterations()) {
2219 mNumIters = mBench.epochIterations();
2220 mState = State::measuring;
2222 mNumIters = mBench.minEpochIterations();
2223 mState = State::upscaling_runtime;
2228 ANKERL_NANOBENCH(NODISCARD) uint64_t calcBestNumIters(std::chrono::nanoseconds elapsed, uint64_t iters)
noexcept {
2229 auto doubleElapsed = d(elapsed);
2230 auto doubleTargetRuntimePerEpoch = d(mTargetRuntimePerEpoch);
2231 auto doubleNewIters = doubleTargetRuntimePerEpoch / doubleElapsed * d(iters);
2233 auto doubleMinEpochIters = d(mBench.minEpochIterations());
2234 if (doubleNewIters < doubleMinEpochIters) {
2235 doubleNewIters = doubleMinEpochIters;
2237 doubleNewIters *= 1.0 + 0.2 * mRng.uniform01();
2241 return static_cast<uint64_t
>(doubleNewIters + 0.5);
2245 if (elapsed * 10 < mTargetRuntimePerEpoch) {
2247 if (mNumIters * 10 < mNumIters) {
2249 showResult(
"iterations overflow. Maybe your code got optimized away?");
2255 mNumIters = calcBestNumIters(elapsed, mNumIters);
2259 void add(std::chrono::nanoseconds elapsed, PerformanceCounters
const& pc)
noexcept {
2260# if defined(ANKERL_NANOBENCH_LOG_ENABLED)
2261 auto oldIters = mNumIters;
2266 if (isCloseEnoughForMeasurements(elapsed)) {
2269 mState = State::measuring;
2270 mNumIters = calcBestNumIters(elapsed, mNumIters);
2273 mState = State::upscaling_runtime;
2278 case State::upscaling_runtime:
2279 if (isCloseEnoughForMeasurements(elapsed)) {
2281 mState = State::measuring;
2282 mTotalElapsed += elapsed;
2283 mTotalNumIters += mNumIters;
2284 mResult.add(elapsed, mNumIters, pc);
2285 mNumIters = calcBestNumIters(mTotalElapsed, mTotalNumIters);
2291 case State::measuring:
2294 mTotalElapsed += elapsed;
2295 mTotalNumIters += mNumIters;
2296 mResult.add(elapsed, mNumIters, pc);
2297 if (0 != mBench.epochIterations()) {
2298 mNumIters = mBench.epochIterations();
2300 mNumIters = calcBestNumIters(mTotalElapsed, mTotalNumIters);
2304 case State::endless:
2305 mNumIters = (std::numeric_limits<uint64_t>::max)();
2309 if (
static_cast<uint64_t
>(mResult.size()) == mBench.epochs()) {
2315 ANKERL_NANOBENCH_LOG(mBench.name() <<
": " << detail::fmt::Number(20, 3, d(elapsed.count())) <<
" elapsed, "
2316 << detail::fmt::Number(20, 3, d(mTargetRuntimePerEpoch.count())) <<
" target. oldIters="
2317 << oldIters <<
", mNumIters=" << mNumIters <<
", mState=" <<
static_cast<int>(mState));
2321 void showResult(std::string
const& errorMessage)
const {
2324 if (mBench.output() !=
nullptr) {
2326 std::vector<fmt::MarkDownColumn> columns;
2330 if (mBench.relative()) {
2332 if (!mBench.results().empty()) {
2335 columns.emplace_back(11, 1,
"relative",
"%", d);
2338 if (mBench.complexityN() > 0) {
2339 columns.emplace_back(14, 0,
"complexityN",
"", mBench.complexityN());
2342 columns.emplace_back(22, 2, mBench.timeUnitName() +
"/" + mBench.unit(),
"",
2343 rMedian / (mBench.timeUnit().count() * mBench.batch()));
2344 columns.emplace_back(22, 2, mBench.unit() +
"/s",
"", rMedian <= 0.0 ? 0.0 : mBench.batch() / rMedian);
2347 columns.emplace_back(10, 1,
"err%",
"%", rErrorMedian * 100.0);
2349 double rInsMedian = -1.0;
2352 columns.emplace_back(18, 2,
"ins/" + mBench.unit(),
"", rInsMedian / mBench.batch());
2355 double rCycMedian = -1.0;
2358 columns.emplace_back(18, 2,
"cyc/" + mBench.unit(),
"", rCycMedian / mBench.batch());
2360 if (rInsMedian > 0.0 && rCycMedian > 0.0) {
2361 columns.emplace_back(9, 3,
"IPC",
"", rCycMedian <= 0.0 ? 0.0 : rInsMedian / rCycMedian);
2365 columns.emplace_back(17, 2,
"bra/" + mBench.unit(),
"", rBraMedian / mBench.batch());
2368 if (rBraMedian >= 1e-9) {
2371 columns.emplace_back(10, 1,
"miss%",
"%", p);
2378 auto& os = *mBench.output();
2382 hash = hash_combine(std::hash<std::string>{}(mBench.unit()), hash);
2383 hash = hash_combine(std::hash<std::string>{}(mBench.title()), hash);
2384 hash = hash_combine(std::hash<std::string>{}(mBench.timeUnitName()), hash);
2385 hash = hash_combine(std::hash<double>{}(mBench.timeUnit().
count()), hash);
2386 hash = hash_combine(std::hash<bool>{}(mBench.relative()), hash);
2387 hash = hash_combine(std::hash<bool>{}(mBench.performanceCounters()), hash);
2389 if (hash != singletonHeaderHash()) {
2390 singletonHeaderHash() = hash;
2394 for (
auto const& col : columns) {
2397 os <<
"| " << mBench.title() << std::endl;
2399 for (
auto const& col : columns) {
2400 os << col.separator();
2402 os <<
"|:" << std::string(mBench.title().size() + 1U,
'-') << std::endl;
2405 if (!errorMessage.empty()) {
2406 for (
auto const& col : columns) {
2407 os << col.invalid();
2409 os <<
"| :boom: " << fmt::MarkDownCode(mBench.name()) <<
" (" << errorMessage <<
')' << std::endl;
2411 for (
auto const& col : columns) {
2415 auto showUnstable = isWarningsEnabled() && rErrorMedian >= 0.05;
2417 os <<
":wavy_dash: ";
2419 os << fmt::MarkDownCode(mBench.name());
2421 auto avgIters = d(mTotalNumIters) / d(mBench.epochs());
2423 auto suggestedIters =
static_cast<uint64_t
>(avgIters * 10 + 0.5);
2425 os <<
" (Unstable with ~" << detail::fmt::Number(1, 1, avgIters)
2426 <<
" iters. Increase `minEpochIterations` to e.g. " << suggestedIters <<
")";
2433 ANKERL_NANOBENCH(NODISCARD)
bool isCloseEnoughForMeasurements(std::chrono::nanoseconds elapsed)
const noexcept {
2434 return elapsed * 3 >= mTargetRuntimePerEpoch * 2;
2437 uint64_t mNumIters = 1;
2438 Bench
const& mBench;
2439 std::chrono::nanoseconds mTargetRuntimePerEpoch{};
2442 std::chrono::nanoseconds mTotalElapsed{};
2443 uint64_t mTotalNumIters = 0;
2444 State mState = State::upscaling_runtime;
2448IterationLogic::IterationLogic(Bench
const& bench)
2449 : mPimpl(new Impl(bench)) {}
2451IterationLogic::~IterationLogic() {
2455uint64_t IterationLogic::numIters() const noexcept {
2457 return mPimpl->mNumIters;
2460void IterationLogic::add(std::chrono::nanoseconds elapsed, PerformanceCounters
const& pc)
noexcept {
2461 mPimpl->add(elapsed, pc);
2464void IterationLogic::moveResultTo(std::vector<Result>& results)
noexcept {
2465 results.emplace_back(std::move(mPimpl->mResult));
2468# if ANKERL_NANOBENCH(PERF_COUNTERS)
2471class LinuxPerformanceCounters {
2474 Target(uint64_t* targetValue_,
bool correctMeasuringOverhead_,
bool correctLoopOverhead_)
2475 : targetValue(targetValue_)
2476 , correctMeasuringOverhead(correctMeasuringOverhead_)
2477 , correctLoopOverhead(correctLoopOverhead_) {}
2479 uint64_t* targetValue{};
2480 bool correctMeasuringOverhead{};
2481 bool correctLoopOverhead{};
2484 LinuxPerformanceCounters() =
default;
2485 LinuxPerformanceCounters(LinuxPerformanceCounters
const&) =
delete;
2486 LinuxPerformanceCounters(LinuxPerformanceCounters&&) =
delete;
2487 LinuxPerformanceCounters& operator=(LinuxPerformanceCounters
const&) =
delete;
2488 LinuxPerformanceCounters& operator=(LinuxPerformanceCounters&&) =
delete;
2489 ~LinuxPerformanceCounters();
2492 inline void start() {}
2494 inline void stop() {}
2496 bool monitor(perf_sw_ids swId, Target target);
2497 bool monitor(perf_hw_id hwId, Target target);
2505 inline void beginMeasure() {
2511 mHasError = -1 == ioctl(mFd, PERF_EVENT_IOC_RESET, PERF_IOC_FLAG_GROUP);
2517 mHasError = -1 == ioctl(mFd, PERF_EVENT_IOC_ENABLE, PERF_IOC_FLAG_GROUP);
2520 inline void endMeasure() {
2526 mHasError = (-1 == ioctl(mFd, PERF_EVENT_IOC_DISABLE, PERF_IOC_FLAG_GROUP));
2531 auto const numBytes =
sizeof(uint64_t) * mCounters.size();
2532 auto ret = read(mFd, mCounters.data(), numBytes);
2533 mHasError =
ret !=
static_cast<ssize_t
>(numBytes);
2536 void updateResults(uint64_t numIters);
2539 template <
typename T>
2540 static inline T divRounded(T a, T divisor) {
2541 return (a + divisor / 2) / divisor;
2545 static inline uint32_t mix(uint32_t x) noexcept {
2552 template <
typename Op>
2554 void calibrate(Op&& op) {
2556 for (
auto& v : mCalibratedOverhead) {
2561 auto newCalibration = mCalibratedOverhead;
2562 for (
auto& v : newCalibration) {
2563 v = (std::numeric_limits<uint64_t>::max)();
2565 for (
size_t iter = 0; iter < 100; ++iter) {
2573 for (
size_t i = 0; i < newCalibration.size(); ++i) {
2574 auto diff = mCounters[i];
2575 if (newCalibration[i] > diff) {
2576 newCalibration[i] = diff;
2581 mCalibratedOverhead = std::move(newCalibration);
2588 uint64_t
const numIters = 100000U + (std::random_device{}() & 3U);
2589 uint64_t n = numIters;
2590 uint32_t x = 1234567;
2598 auto measure1 = mCounters;
2609 auto measure2 = mCounters;
2611 for (
size_t i = 0; i < mCounters.size(); ++i) {
2613 auto m1 = measure1[i] > mCalibratedOverhead[i] ? measure1[i] - mCalibratedOverhead[i] : 0;
2614 auto m2 = measure2[i] > mCalibratedOverhead[i] ? measure2[i] - mCalibratedOverhead[i] : 0;
2615 auto overhead = m1 * 2 > m2 ? m1 * 2 - m2 : 0;
2617 mLoopOverhead[i] = divRounded(overhead, numIters);
2623 bool monitor(uint32_t type, uint64_t eventid, Target target);
2625 std::map<uint64_t, Target> mIdToTarget{};
2628 std::vector<uint64_t> mCounters{3};
2629 std::vector<uint64_t> mCalibratedOverhead{3};
2630 std::vector<uint64_t> mLoopOverhead{3};
2632 uint64_t mTimeEnabledNanos = 0;
2633 uint64_t mTimeRunningNanos = 0;
2635 bool mHasError =
false;
2639LinuxPerformanceCounters::~LinuxPerformanceCounters() {
2645bool LinuxPerformanceCounters::monitor(perf_sw_ids swId, LinuxPerformanceCounters::Target target) {
2646 return monitor(PERF_TYPE_SOFTWARE, swId, target);
2649bool LinuxPerformanceCounters::monitor(perf_hw_id hwId, LinuxPerformanceCounters::Target target) {
2650 return monitor(PERF_TYPE_HARDWARE, hwId, target);
2655void LinuxPerformanceCounters::updateResults(uint64_t numIters) {
2657 for (
auto& id_value : mIdToTarget) {
2658 *id_value.second.targetValue = UINT64_C(0);
2665 mTimeEnabledNanos = mCounters[1] - mCalibratedOverhead[1];
2666 mTimeRunningNanos = mCounters[2] - mCalibratedOverhead[2];
2668 for (uint64_t i = 0; i < mCounters[0]; ++i) {
2669 auto idx =
static_cast<size_t>(3 + i * 2 + 0);
2670 auto id = mCounters[idx + 1U];
2672 auto it = mIdToTarget.find(
id);
2673 if (it != mIdToTarget.end()) {
2675 auto& tgt = it->second;
2676 *tgt.targetValue = mCounters[idx];
2677 if (tgt.correctMeasuringOverhead) {
2678 if (*tgt.targetValue >= mCalibratedOverhead[idx]) {
2679 *tgt.targetValue -= mCalibratedOverhead[idx];
2681 *tgt.targetValue = 0U;
2684 if (tgt.correctLoopOverhead) {
2685 auto correctionVal = mLoopOverhead[idx] * numIters;
2686 if (*tgt.targetValue >= correctionVal) {
2687 *tgt.targetValue -= correctionVal;
2689 *tgt.targetValue = 0U;
2696bool LinuxPerformanceCounters::monitor(uint32_t type, uint64_t eventid, Target target) {
2697 *target.targetValue = (std::numeric_limits<uint64_t>::max)();
2702 auto pea = perf_event_attr();
2703 std::memset(&pea, 0,
sizeof(perf_event_attr));
2705 pea.size =
sizeof(perf_event_attr);
2706 pea.config = eventid;
2708 pea.exclude_kernel = 1;
2712 pea.read_format = PERF_FORMAT_GROUP | PERF_FORMAT_ID | PERF_FORMAT_TOTAL_TIME_ENABLED | PERF_FORMAT_TOTAL_TIME_RUNNING;
2716# if defined(PERF_FLAG_FD_CLOEXEC)
2717 const unsigned long flags = PERF_FLAG_FD_CLOEXEC;
2719 const unsigned long flags = 0;
2723 auto fd =
static_cast<int>(syscall(__NR_perf_event_open, &pea, pid, cpu, mFd,
flags));
2733 if (-1 == ioctl(fd, PERF_EVENT_IOC_ID, &
id)) {
2739 mIdToTarget.emplace(
id, target);
2742 auto size = 3 + 2 * mIdToTarget.size();
2743 mCounters.resize(size);
2744 mCalibratedOverhead.resize(size);
2745 mLoopOverhead.resize(size);
2750PerformanceCounters::PerformanceCounters()
2751 : mPc(new LinuxPerformanceCounters())
2756 mHas.cpuCycles = mPc->monitor(PERF_COUNT_HW_REF_CPU_CYCLES, LinuxPerformanceCounters::Target(&mVal.cpuCycles,
true,
false));
2757 if (!mHas.cpuCycles) {
2759 mHas.cpuCycles = mPc->monitor(PERF_COUNT_HW_CPU_CYCLES, LinuxPerformanceCounters::Target(&mVal.cpuCycles,
true,
false));
2761 mHas.instructions = mPc->monitor(PERF_COUNT_HW_INSTRUCTIONS, LinuxPerformanceCounters::Target(&mVal.instructions,
true,
true));
2762 mHas.branchInstructions =
2763 mPc->monitor(PERF_COUNT_HW_BRANCH_INSTRUCTIONS, LinuxPerformanceCounters::Target(&mVal.branchInstructions,
true,
false));
2764 mHas.branchMisses = mPc->monitor(PERF_COUNT_HW_BRANCH_MISSES, LinuxPerformanceCounters::Target(&mVal.branchMisses,
true,
false));
2768 mHas.pageFaults = mPc->monitor(PERF_COUNT_SW_PAGE_FAULTS, LinuxPerformanceCounters::Target(&mVal.pageFaults,
true,
false));
2769 mHas.contextSwitches =
2770 mPc->monitor(PERF_COUNT_SW_CONTEXT_SWITCHES, LinuxPerformanceCounters::Target(&mVal.contextSwitches,
true,
false));
2774 auto before = ankerl::nanobench::Clock::now();
2775 auto after = ankerl::nanobench::Clock::now();
2780 if (mPc->hasError()) {
2782 mHas = PerfCountSet<bool>{};
2786PerformanceCounters::~PerformanceCounters() {
2791void PerformanceCounters::beginMeasure() {
2792 mPc->beginMeasure();
2795void PerformanceCounters::endMeasure() {
2799void PerformanceCounters::updateResults(uint64_t numIters) {
2800 mPc->updateResults(numIters);
2805PerformanceCounters::PerformanceCounters() =
default;
2806PerformanceCounters::~PerformanceCounters() =
default;
2807void PerformanceCounters::beginMeasure() {}
2808void PerformanceCounters::endMeasure() {}
2809void PerformanceCounters::updateResults(uint64_t) {}
2813ANKERL_NANOBENCH(NODISCARD) PerfCountSet<uint64_t>
const& PerformanceCounters::val() const noexcept {
2816ANKERL_NANOBENCH(NODISCARD) PerfCountSet<bool>
const& PerformanceCounters::has() const noexcept {
2824NumSep::NumSep(
char sep)
2827char NumSep::do_thousands_sep()
const {
2831std::string NumSep::do_grouping()
const {
2836StreamStateRestorer::StreamStateRestorer(std::ostream&
s)
2838 , mLocale(
s.getloc())
2839 , mPrecision(
s.precision())
2842 , mFmtFlags(
s.
flags()) {}
2844StreamStateRestorer::~StreamStateRestorer() {
2849void StreamStateRestorer::restore() {
2850 mStream.imbue(mLocale);
2851 mStream.precision(mPrecision);
2852 mStream.width(mWidth);
2853 mStream.fill(mFill);
2854 mStream.flags(mFmtFlags);
2857Number::Number(
int width,
int precision, int64_t value)
2859 , mPrecision(precision)
2860 , mValue(d(value)) {}
2862Number::Number(
int width,
int precision,
double value)
2864 , mPrecision(precision)
2867std::ostream& Number::write(std::ostream& os)
const {
2868 StreamStateRestorer
const restorer(os);
2869 os.imbue(std::locale(os.getloc(),
new NumSep(
',')));
2870 os << std::setw(mWidth) << std::setprecision(mPrecision) << std::fixed << mValue;
2874std::string Number::to_s()
const {
2875 std::stringstream ss;
2880std::string to_s(uint64_t n) {
2883 str +=
static_cast<char>(
'0' +
static_cast<char>(n % 10));
2886 std::reverse(str.begin(), str.end());
2890std::ostream&
operator<<(std::ostream& os, Number
const& n) {
2894MarkDownColumn::MarkDownColumn(
int w,
int prec, std::string tit, std::string suff,
double val) noexcept
2897 , mTitle(std::move(tit))
2898 , mSuffix(std::move(suff))
2901std::string MarkDownColumn::title()
const {
2902 std::stringstream ss;
2903 ss <<
'|' << std::setw(mWidth - 2) << std::right << mTitle <<
' ';
2907std::string MarkDownColumn::separator()
const {
2908 std::string sep(
static_cast<size_t>(mWidth),
'-');
2914std::string MarkDownColumn::invalid()
const {
2915 std::string sep(
static_cast<size_t>(mWidth),
' ');
2917 sep[sep.size() - 2] =
'-';
2921std::string MarkDownColumn::value()
const {
2922 std::stringstream ss;
2923 auto width = mWidth - 2 -
static_cast<int>(mSuffix.size());
2924 ss <<
'|' << Number(width, mPrecision, mValue) << mSuffix <<
' ';
2929MarkDownCode::MarkDownCode(std::string
const& what) {
2930 mWhat.reserve(what.size() + 2);
2931 mWhat.push_back(
'`');
2932 for (
char const c : what) {
2935 mWhat.push_back(
'`');
2938 mWhat.push_back(
'`');
2941std::ostream& MarkDownCode::write(std::ostream& os)
const {
2945std::ostream&
operator<<(std::ostream& os, MarkDownCode
const& mdCode) {
2946 return mdCode.write(os);
2952Config::Config() =
default;
2953Config::~Config() =
default;
2954Config& Config::operator=(Config
const&) =
default;
2955Config& Config::operator=(Config&&) noexcept(
ANKERL_NANOBENCH(NOEXCEPT_STRING_MOVE)) = default;
2956Config::Config(Config const&) = default;
2957Config::Config(Config&&) noexcept = default;
2967template <
typename T>
2968inline constexpr typename std::underlying_type<T>::type u(T val)
noexcept {
2969 return static_cast<typename std::underlying_type<T>::type
>(val);
2975 : mConfig(
std::move(benchmarkConfig))
2976 , mNameToMeasurements{
detail::u(
Result::Measure::_size)} {}
2978void Result::add(Clock::duration totalElapsed, uint64_t iters, detail::PerformanceCounters
const& pc) {
2982 double const dIters = d(iters);
2983 mNameToMeasurements[u(Result::Measure::iterations)].push_back(dIters);
2985 mNameToMeasurements[u(Result::Measure::elapsed)].push_back(d(totalElapsed) / dIters);
2986 if (pc.has().pageFaults) {
2987 mNameToMeasurements[u(Result::Measure::pagefaults)].push_back(d(pc.val().pageFaults) / dIters);
2989 if (pc.has().cpuCycles) {
2990 mNameToMeasurements[u(Result::Measure::cpucycles)].push_back(d(pc.val().cpuCycles) / dIters);
2992 if (pc.has().contextSwitches) {
2993 mNameToMeasurements[u(Result::Measure::contextswitches)].push_back(d(pc.val().contextSwitches) / dIters);
2995 if (pc.has().instructions) {
2996 mNameToMeasurements[u(Result::Measure::instructions)].push_back(d(pc.val().instructions) / dIters);
2998 if (pc.has().branchInstructions) {
2999 double branchInstructions = 0.0;
3001 if (pc.val().branchInstructions > iters + 1U) {
3002 branchInstructions = d(pc.val().branchInstructions - (iters + 1U));
3004 mNameToMeasurements[u(Result::Measure::branchinstructions)].push_back(branchInstructions / dIters);
3006 if (pc.has().branchMisses) {
3008 double branchMisses = d(pc.val().branchMisses);
3009 if (branchMisses > branchInstructions) {
3011 branchMisses = branchInstructions;
3015 branchMisses -= 1.0;
3016 if (branchMisses < 1.0) {
3019 mNameToMeasurements[u(Result::Measure::branchmisses)].push_back(branchMisses / dIters);
3024Config
const& Result::config() const noexcept {
3028inline double calcMedian(std::vector<double>&
data) {
3032 std::sort(
data.begin(),
data.end());
3034 auto midIdx =
data.size() / 2U;
3035 if (1U == (
data.size() & 1U)) {
3036 return data[midIdx];
3038 return (
data[midIdx - 1U] +
data[midIdx]) / 2U;
3041double Result::median(Measure m)
const {
3043 auto data = mNameToMeasurements[detail::u(m)];
3044 return calcMedian(
data);
3047double Result::average(Measure m)
const {
3049 auto const&
data = mNameToMeasurements[detail::u(m)];
3055 return sum(m) / d(
data.size());
3058double Result::medianAbsolutePercentError(Measure m)
const {
3060 auto data = mNameToMeasurements[detail::u(m)];
3064 auto med = calcMedian(
data);
3067 for (
auto& x :
data) {
3073 return calcMedian(
data);
3077 auto const&
data = mNameToMeasurements[detail::u(m)];
3078 return std::accumulate(
data.begin(),
data.end(), 0.0);
3081double Result::sumProduct(Measure m1, Measure m2)
const noexcept {
3082 auto const& data1 = mNameToMeasurements[detail::u(m1)];
3083 auto const& data2 = mNameToMeasurements[detail::u(m2)];
3085 if (data1.size() != data2.size()) {
3089 double result = 0.0;
3090 for (
size_t i = 0,
s = data1.size(); i !=
s; ++i) {
3091 result += data1[i] * data2[i];
3096bool Result::has(Measure m)
const noexcept {
3097 return !mNameToMeasurements[detail::u(m)].empty();
3100double Result::get(
size_t idx, Measure m)
const {
3101 auto const&
data = mNameToMeasurements[detail::u(m)];
3102 return data.at(idx);
3105bool Result::empty() const noexcept {
3106 return 0U == size();
3109size_t Result::size() const noexcept {
3110 auto const&
data = mNameToMeasurements[detail::u(Measure::elapsed)];
3114double Result::minimum(Measure m)
const noexcept {
3115 auto const&
data = mNameToMeasurements[detail::u(m)];
3121 return *std::min_element(
data.begin(),
data.end());
3124double Result::maximum(Measure m)
const noexcept {
3125 auto const&
data = mNameToMeasurements[detail::u(m)];
3131 return *std::max_element(
data.begin(),
data.end());
3134std::string
const& Result::context(
char const* variableName)
const {
3135 return mConfig.mContext.at(variableName);
3138std::string
const& Result::context(std::string
const& variableName)
const {
3139 return mConfig.mContext.at(variableName);
3142Result::Measure Result::fromString(std::string
const& str) {
3143 if (str ==
"elapsed") {
3144 return Measure::elapsed;
3146 if (str ==
"iterations") {
3147 return Measure::iterations;
3149 if (str ==
"pagefaults") {
3150 return Measure::pagefaults;
3152 if (str ==
"cpucycles") {
3153 return Measure::cpucycles;
3155 if (str ==
"contextswitches") {
3156 return Measure::contextswitches;
3158 if (str ==
"instructions") {
3159 return Measure::instructions;
3161 if (str ==
"branchinstructions") {
3162 return Measure::branchinstructions;
3164 if (str ==
"branchmisses") {
3165 return Measure::branchmisses;
3168 return Measure::_size;
3173 mConfig.mOut = &std::cout;
3176Bench::Bench(Bench&&) noexcept = default;
3177Bench& Bench::operator=(Bench&&) noexcept(
ANKERL_NANOBENCH(NOEXCEPT_STRING_MOVE)) = default;
3178Bench::Bench(Bench const&) = default;
3179Bench& Bench::operator=(Bench const&) = default;
3180Bench::~Bench() noexcept = default;
3182double Bench::batch() const noexcept {
3183 return mConfig.mBatch;
3186double Bench::complexityN() const noexcept {
3187 return mConfig.mComplexityN;
3192Bench& Bench::relative(
bool isRelativeEnabled)
noexcept {
3193 mConfig.mIsRelative = isRelativeEnabled;
3196bool Bench::relative() const noexcept {
3197 return mConfig.mIsRelative;
3201 mConfig.mShowPerformanceCounters = showPerformanceCounters;
3205 return mConfig.mShowPerformanceCounters;
3211Bench& Bench::unit(
char const* u) {
3212 if (u != mConfig.mUnit) {
3219Bench& Bench::unit(std::string
const& u) {
3220 return unit(u.c_str());
3223std::string
const& Bench::unit() const noexcept {
3224 return mConfig.mUnit;
3227Bench& Bench::timeUnit(std::chrono::duration<double>
const& tu, std::string
const& tuName) {
3228 mConfig.mTimeUnit = tu;
3229 mConfig.mTimeUnitName = tuName;
3233std::string
const& Bench::timeUnitName() const noexcept {
3234 return mConfig.mTimeUnitName;
3237std::chrono::duration<double>
const& Bench::timeUnit() const noexcept {
3238 return mConfig.mTimeUnit;
3242Bench& Bench::title(
const char* benchmarkTitle) {
3243 if (benchmarkTitle != mConfig.mBenchmarkTitle) {
3246 mConfig.mBenchmarkTitle = benchmarkTitle;
3249Bench& Bench::title(std::string
const& benchmarkTitle) {
3250 if (benchmarkTitle != mConfig.mBenchmarkTitle) {
3253 mConfig.mBenchmarkTitle = benchmarkTitle;
3257std::string
const& Bench::title() const noexcept {
3258 return mConfig.mBenchmarkTitle;
3262 mConfig.mBenchmarkName = benchmarkName;
3266Bench&
Bench::name(std::string
const& benchmarkName) {
3267 mConfig.mBenchmarkName = benchmarkName;
3272 return mConfig.mBenchmarkName;
3275Bench& Bench::context(
char const* variableName,
char const* variableValue) {
3276 mConfig.mContext[variableName] = variableValue;
3280Bench& Bench::context(std::string
const& variableName, std::string
const& variableValue) {
3281 mConfig.mContext[variableName] = variableValue;
3285Bench& Bench::clearContext() {
3286 mConfig.mContext.clear();
3291Bench& Bench::epochs(
size_t numEpochs)
noexcept {
3292 mConfig.mNumEpochs = numEpochs;
3295size_t Bench::epochs() const noexcept {
3296 return mConfig.mNumEpochs;
3300Bench& Bench::clockResolutionMultiple(
size_t multiple)
noexcept {
3301 mConfig.mClockResolutionMultiple = multiple;
3304size_t Bench::clockResolutionMultiple() const noexcept {
3305 return mConfig.mClockResolutionMultiple;
3309Bench& Bench::maxEpochTime(std::chrono::nanoseconds t)
noexcept {
3310 mConfig.mMaxEpochTime =
t;
3313std::chrono::nanoseconds Bench::maxEpochTime() const noexcept {
3314 return mConfig.mMaxEpochTime;
3318Bench& Bench::minEpochTime(std::chrono::nanoseconds t)
noexcept {
3319 mConfig.mMinEpochTime =
t;
3322std::chrono::nanoseconds Bench::minEpochTime() const noexcept {
3323 return mConfig.mMinEpochTime;
3326Bench& Bench::minEpochIterations(uint64_t numIters)
noexcept {
3327 mConfig.mMinEpochIterations = (numIters == 0) ? 1 : numIters;
3330uint64_t Bench::minEpochIterations() const noexcept {
3331 return mConfig.mMinEpochIterations;
3334Bench& Bench::epochIterations(uint64_t numIters)
noexcept {
3335 mConfig.mEpochIterations = numIters;
3338uint64_t Bench::epochIterations() const noexcept {
3339 return mConfig.mEpochIterations;
3342Bench& Bench::warmup(uint64_t numWarmupIters)
noexcept {
3343 mConfig.mWarmup = numWarmupIters;
3346uint64_t Bench::warmup() const noexcept {
3347 return mConfig.mWarmup;
3350Bench& Bench::config(Config
const& benchmarkConfig) {
3351 mConfig = benchmarkConfig;
3354Config
const& Bench::config() const noexcept {
3358Bench& Bench::output(std::ostream* outstream)
noexcept {
3359 mConfig.mOut = outstream;
3364 return mConfig.mOut;
3367std::vector<Result>
const& Bench::results() const noexcept {
3371Bench&
Bench::render(
char const* templateContent, std::ostream& os) {
3376Bench&
Bench::render(std::string
const& templateContent, std::ostream& os) {
3381std::vector<BigO> Bench::complexityBigO()
const {
3382 std::vector<BigO> bigOs;
3383 auto rangeMeasure = BigO::collectRangeMeasure(mResults);
3384 bigOs.emplace_back(
"O(1)", rangeMeasure, [](
double) {
3387 bigOs.emplace_back(
"O(n)", rangeMeasure, [](
double n) {
3390 bigOs.emplace_back(
"O(log n)", rangeMeasure, [](
double n) {
3391 return std::log2(n);
3393 bigOs.emplace_back(
"O(n log n)", rangeMeasure, [](
double n) {
3394 return n * std::log2(n);
3396 bigOs.emplace_back(
"O(n^2)", rangeMeasure, [](
double n) {
3399 bigOs.emplace_back(
"O(n^3)", rangeMeasure, [](
double n) {
3402 std::sort(bigOs.begin(), bigOs.end());
3409 std::random_device rd;
3410 std::uniform_int_distribution<uint64_t> dist;
3414 }
while (mX == 0 && mY == 0);
3418uint64_t splitMix64(uint64_t& state) noexcept {
3419 uint64_t z = (state += UINT64_C(0x9e3779b97f4a7c15));
3420 z = (z ^ (z >> 30U)) * UINT64_C(0xbf58476d1ce4e5b9);
3421 z = (z ^ (z >> 27U)) * UINT64_C(0x94d049bb133111eb);
3422 return z ^ (z >> 31U);
3426Rng::Rng(uint64_t seed) noexcept
3427 : mX(splitMix64(seed))
3428 , mY(splitMix64(seed)) {
3429 for (
size_t i = 0; i < 10; ++i) {
3435Rng::Rng(uint64_t x, uint64_t y) noexcept
3439Rng Rng::copy() const noexcept {
3443Rng::Rng(std::vector<uint64_t>
const&
data)
3446 if (
data.size() != 2) {
3447 throw std::runtime_error(
"ankerl::nanobench::Rng::Rng: needed exactly 2 entries in data, but got " +
3448 detail::fmt::to_s(
data.size()));
3454std::vector<uint64_t> Rng::state()
const {
3455 std::vector<uint64_t>
data(2);
3461BigO::RangeMeasure BigO::collectRangeMeasure(std::vector<Result>
const& results) {
3462 BigO::RangeMeasure rangeMeasure;
3463 for (
auto const& result : results) {
3464 if (result.config().mComplexityN > 0.0) {
3465 rangeMeasure.emplace_back(result.config().mComplexityN, result.median(Result::Measure::elapsed));
3468 return rangeMeasure;
3471BigO::BigO(std::string bigOName, RangeMeasure
const& rangeMeasure)
3472 : mName(
std::move(bigOName)) {
3475 double sumRangeMeasure = 0.0;
3476 double sumRangeRange = 0.0;
3478 for (
const auto& rm : rangeMeasure) {
3479 sumRangeMeasure += rm.first * rm.second;
3480 sumRangeRange += rm.first * rm.first;
3482 mConstant = sumRangeMeasure / sumRangeRange;
3486 double sumMeasure = 0.0;
3487 for (
const auto& rm : rangeMeasure) {
3488 auto diff = mConstant * rm.first - rm.second;
3491 sumMeasure += rm.second;
3494 auto n = detail::d(rangeMeasure.size());
3495 auto mean = sumMeasure / n;
3496 mNormalizedRootMeanSquare = std::sqrt(err / n) / mean;
3499BigO::BigO(
const char* bigOName, RangeMeasure
const& rangeMeasure)
3500 : BigO(
std::string(bigOName), rangeMeasure) {}
3502std::string
const&
BigO::name() const noexcept {
3506double BigO::constant() const noexcept {
3510double BigO::normalizedRootMeanSquare() const noexcept {
3511 return mNormalizedRootMeanSquare;
3515 return std::tie(mNormalizedRootMeanSquare, mName) < std::tie(other.mNormalizedRootMeanSquare, other.mName);
3518std::ostream&
operator<<(std::ostream& os, BigO
const& bigO) {
3519 return os << bigO.constant() <<
" * " << bigO.name() <<
", rms=" << bigO.normalizedRootMeanSquare();
3522std::ostream&
operator<<(std::ostream& os, std::vector<ankerl::nanobench::BigO>
const& bigOs) {
3523 detail::fmt::StreamStateRestorer
const restorer(os);
3524 os << std::endl <<
"| coefficient | err% | complexity" << std::endl <<
"|--------------:|-------:|------------" << std::endl;
3525 for (
auto const& bigO : bigOs) {
3526 os <<
"|" << std::setw(14) << std::setprecision(7) << std::scientific << bigO.constant() <<
" ";
3527 os <<
"|" << detail::fmt::Number(6, 1, bigO.normalizedRootMeanSquare() * 100.0) <<
"% ";
3528 os <<
"| " << bigO.name();
Main entry point to nanobench's benchmarking facility.
Bench & operator=(Bench const &other)
ANKERL_NANOBENCH(NODISCARD) std Bench & doNotOptimizeAway(Arg &&arg)
Retrieves all benchmark results collected by the bench object so far.
Bench & run(char const *benchmarkName, Op &&op)
Repeatedly calls op() based on the configuration, and performs measurements.
Bench & batch(T b) noexcept
Sets the batch size.
std::vector< BigO > complexityBigO() const
Bench()
Creates a new benchmark for configuration and running of benchmarks.
Bench & operator=(Bench &&other) noexcept(ANKERL_NANOBENCH(NOEXCEPT_STRING_MOVE))
Bench & runImpl(SetupOp &setupOp, Op &&op)
detail::SetupRunner< SetupOp > setup(SetupOp setupOp)
Configure an untimed setup step per epoch (fluent API).
Bench(Bench &&other) noexcept
Bench(Bench const &other)
Bench & complexityN(T n) noexcept
static RangeMeasure mapRangeMeasure(RangeMeasure data, Op op)
BigO(std::string bigOName, RangeMeasure const &scaledRangeMeasure)
std::vector< std::pair< double, double > > RangeMeasure
BigO(char const *bigOName, RangeMeasure const &rangeMeasure, Op rangeToN)
static RangeMeasure collectRangeMeasure(std::vector< Result > const &results)
BigO(std::string bigOName, RangeMeasure const &rangeMeasure, Op rangeToN)
BigO(char const *bigOName, RangeMeasure const &scaledRangeMeasure)
Result(Config benchmarkConfig)
static Measure fromString(std::string const &str)
void add(Clock::duration totalElapsed, uint64_t iters, detail::PerformanceCounters const &pc)
Result(Result &&other) noexcept
ANKERL_NANOBENCH(NODISCARD) Config const &config() const noexcept
Result & operator=(Result const &other)
Result(Result const &other)
Result & operator=(Result &&other) noexcept(ANKERL_NANOBENCH(NOEXCEPT_STRING_MOVE))
An extremely fast random generator.
static constexpr uint64_t() min()
Rng(Rng const &)=delete
As a safety precaution, we don't allow copying.
void shuffle(Container &container) noexcept
Shuffles all entries in the given container.
Rng(Rng &&) noexcept=default
Rng & operator=(Rng const &)=delete
Same as Rng(Rng const&), we don't allow assignment.
static constexpr uint64_t() max()
double uniform01() noexcept
Provides a random uniform double value between 0 and 1.
uint64_t result_type
This RNG provides 64bit randomness.
void moveResultTo(std::vector< Result > &results) noexcept
void add(std::chrono::nanoseconds elapsed, PerformanceCounters const &pc) noexcept
IterationLogic(IterationLogic &&)=delete
IterationLogic & operator=(IterationLogic const &)=delete
ANKERL_NANOBENCH(NODISCARD) uint64_t numIters() const noexcept
IterationLogic(IterationLogic const &)=delete
IterationLogic(Bench const &bench)
IterationLogic & operator=(IterationLogic &&)=delete
SetupRunner(SetupOp setupOp, Bench &bench)
#define T(expected, seed, data)
void doNotOptimizeAway(T &val)
PerformanceCounters & performanceCounters()
void doNotOptimizeAway(T const &val)
char const * json() noexcept
Template to generate JSON data.
char const * csv() noexcept
CSV data for the benchmark results.
char const * pyperf() noexcept
Output in pyperf compatible JSON format, which can be used for more analyzation.
char const * htmlBoxplot() noexcept
HTML output that uses plotly to generate an interactive boxplot chart. See the tutorial for an exampl...
void render(char const *mustacheTemplate, Bench const &bench, std::ostream &out)
Renders output from a mustache-like template and benchmark results.
std::conditional< std::chrono::high_resolution_clock::is_steady, std::chrono::high_resolution_clock, std::chrono::steady_clock >::type Clock
void render(std::string const &mustacheTemplate, std::vector< Result > const &results, std::ostream &out)
std::ostream & operator<<(std::ostream &os, BigO const &bigO)
std::ostream & operator<<(std::ostream &os, std::vector< ankerl::nanobench::BigO > const &bigOs)
void doNotOptimizeAway(Arg &&arg)
Makes sure none of the given arguments are optimized away by the compiler.
#define ANKERL_NANOBENCH_LOG(x)
#define ANKERL_NANOBENCH_NO_SANITIZE(...)
#define ANKERL_NANOBENCH(x)
bool operator==(const CNetAddr &a, const CNetAddr &b)
bool operator<(const CNetAddr &a, const CNetAddr &b)
Config & operator=(Config const &other)
Config(Config const &other)
Config & operator=(Config &&other) noexcept(ANKERL_NANOBENCH(NOEXCEPT_STRING_MOVE))
Config(Config &&other) noexcept
static SECP256K1_INLINE uint64_t rotl(const uint64_t x, int k)