30#ifndef ANKERL_NANOBENCH_H_INCLUDED
31#define ANKERL_NANOBENCH_H_INCLUDED
34#define ANKERL_NANOBENCH_VERSION_MAJOR 4
35#define ANKERL_NANOBENCH_VERSION_MINOR 3
36#define ANKERL_NANOBENCH_VERSION_PATCH 11
47#include <unordered_map>
50#define ANKERL_NANOBENCH(x) ANKERL_NANOBENCH_PRIVATE_##x()
52#define ANKERL_NANOBENCH_PRIVATE_CXX() __cplusplus
53#define ANKERL_NANOBENCH_PRIVATE_CXX98() 199711L
54#define ANKERL_NANOBENCH_PRIVATE_CXX11() 201103L
55#define ANKERL_NANOBENCH_PRIVATE_CXX14() 201402L
56#define ANKERL_NANOBENCH_PRIVATE_CXX17() 201703L
58#if ANKERL_NANOBENCH(CXX) >= ANKERL_NANOBENCH(CXX17)
59# define ANKERL_NANOBENCH_PRIVATE_NODISCARD() [[nodiscard]]
61# define ANKERL_NANOBENCH_PRIVATE_NODISCARD()
65# define ANKERL_NANOBENCH_PRIVATE_IGNORE_PADDED_PUSH() \
66 _Pragma("clang diagnostic push") _Pragma("clang diagnostic ignored \"-Wpadded\"")
67# define ANKERL_NANOBENCH_PRIVATE_IGNORE_PADDED_POP() _Pragma("clang diagnostic pop")
69# define ANKERL_NANOBENCH_PRIVATE_IGNORE_PADDED_PUSH()
70# define ANKERL_NANOBENCH_PRIVATE_IGNORE_PADDED_POP()
74# define ANKERL_NANOBENCH_PRIVATE_IGNORE_EFFCPP_PUSH() _Pragma("GCC diagnostic push") _Pragma("GCC diagnostic ignored \"-Weffc++\"")
75# define ANKERL_NANOBENCH_PRIVATE_IGNORE_EFFCPP_POP() _Pragma("GCC diagnostic pop")
77# define ANKERL_NANOBENCH_PRIVATE_IGNORE_EFFCPP_PUSH()
78# define ANKERL_NANOBENCH_PRIVATE_IGNORE_EFFCPP_POP()
81#if defined(ANKERL_NANOBENCH_LOG_ENABLED)
83# define ANKERL_NANOBENCH_LOG(x) \
85 std::cout << __FUNCTION__ << "@" << __LINE__ << ": " << x << std::endl; \
88# define ANKERL_NANOBENCH_LOG(x) \
93#define ANKERL_NANOBENCH_PRIVATE_PERF_COUNTERS() 0
94#if defined(__linux__) && !defined(ANKERL_NANOBENCH_DISABLE_PERF_COUNTERS)
95# include <linux/version.h>
96# if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)
99# undef ANKERL_NANOBENCH_PRIVATE_PERF_COUNTERS
100# define ANKERL_NANOBENCH_PRIVATE_PERF_COUNTERS() 1
104#if defined(__clang__)
105# define ANKERL_NANOBENCH_NO_SANITIZE(...) __attribute__((no_sanitize(__VA_ARGS__)))
107# define ANKERL_NANOBENCH_NO_SANITIZE(...)
111# define ANKERL_NANOBENCH_PRIVATE_NOINLINE() __declspec(noinline)
113# define ANKERL_NANOBENCH_PRIVATE_NOINLINE() __attribute__((noinline))
118#if defined(__GNUC__) && __GNUC__ < 5
119# define ANKERL_NANOBENCH_IS_TRIVIALLY_COPYABLE(...) __has_trivial_copy(__VA_ARGS__)
121# define ANKERL_NANOBENCH_IS_TRIVIALLY_COPYABLE(...) std::is_trivially_copyable<__VA_ARGS__>::value
126#define ANKERL_NANOBENCH_PRIVATE_NOEXCEPT_STRING_MOVE() std::is_nothrow_move_assignable<std::string>::value
133using Clock = std::conditional<std::chrono::high_resolution_clock::is_steady, std::chrono::high_resolution_clock,
134 std::chrono::steady_clock>::type;
142template <
typename SetupOp>
297void render(std::string
const& mustacheTemplate,
Bench const& bench, std::ostream&
out);
307void render(
char const* mustacheTemplate, std::vector<Result>
const& results, std::ostream&
out);
308void render(std::string
const& mustacheTemplate, std::vector<Result>
const& results, std::ostream&
out);
322char const*
csv() noexcept;
363class PerformanceCounters;
365#if ANKERL_NANOBENCH(PERF_COUNTERS)
366class LinuxPerformanceCounters;
394 std::string mBenchmarkTitle =
"benchmark";
395 std::string mBenchmarkName =
"noname";
396 std::string mUnit =
"op";
398 double mComplexityN = -1.0;
399 size_t mNumEpochs = 11;
400 size_t mClockResolutionMultiple =
static_cast<size_t>(1000);
401 std::chrono::nanoseconds mMaxEpochTime = std::chrono::milliseconds(100);
402 std::chrono::nanoseconds mMinEpochTime = std::chrono::milliseconds(1);
403 uint64_t mMinEpochIterations{1};
405 uint64_t mEpochIterations{0};
406 uint64_t mWarmup = 0;
407 std::ostream* mOut =
nullptr;
408 std::chrono::duration<double> mTimeUnit = std::chrono::nanoseconds{1};
409 std::string mTimeUnitName =
"ns";
410 bool mShowPerformanceCounters =
true;
411 bool mIsRelative =
false;
412 std::unordered_map<std::string, std::string> mContext{};
473 std::vector<std::vector<double>> mNameToMeasurements{};
501 static constexpr uint64_t(min)();
502 static constexpr uint64_t(max)();
518 Rng& operator=(
Rng&&) noexcept = default;
519 ~
Rng() noexcept = default;
546 explicit
Rng(uint64_t seed) noexcept;
547 Rng(uint64_t x, uint64_t y) noexcept;
562 inline uint64_t operator()() noexcept;
580 inline uint32_t bounded(uint32_t range) noexcept;
591 inline
double uniform01() noexcept;
600 template <typename Container>
601 void shuffle(Container& container) noexcept;
612 static constexpr uint64_t
rotl(uint64_t x,
unsigned k) noexcept;
664 template <typename Op>
666 Bench& run(
char const* benchmarkName, Op&& op);
668 template <typename Op>
670 Bench& run(
std::
string const& benchmarkName, Op&& op);
676 template <typename Op>
685 Bench& title(
char const* benchmarkTitle);
710 Bench& context(
char const* variableName,
char const* variableValue);
711 Bench& context(
std::
string const& variableName,
std::
string const& variableValue);
732 template <typename T>
733 Bench& batch(T b) noexcept;
757 Bench& timeUnit(
std::chrono::duration<
double> const& tu,
std::
string const& tuName);
759 ANKERL_NANOBENCH(NODISCARD)
std::chrono::duration<
double> const& timeUnit() const noexcept;
791 Bench& clockResolutionMultiple(
size_t multiple) noexcept;
809 Bench& epochs(
size_t numEpochs) noexcept;
822 Bench& maxEpochTime(
std::chrono::nanoseconds t) noexcept;
835 Bench& minEpochTime(
std::chrono::nanoseconds t) noexcept;
848 Bench& minEpochIterations(uint64_t numIters) noexcept;
857 Bench& epochIterations(uint64_t numIters) noexcept;
869 Bench& warmup(uint64_t numWarmupIters) noexcept;
889 Bench& relative(
bool isRelativeEnabled) noexcept;
920 template <typename Arg>
937 template <typename T>
938 Bench& complexityN(T n) noexcept;
997 template <typename Op>
998 BigO complexityBigO(
char const*
name, Op op) const;
1000 template <typename Op>
1001 BigO complexityBigO(
std::
string const&
name, Op op) const;
1021 template <typename SetupOp>
1022 detail::SetupRunner<SetupOp>
setup(SetupOp setupOp);
1025 template <typename SetupOp, typename Op>
1026 Bench& runImpl(SetupOp& setupOp, Op&& op);
1028 template <typename SetupOp>
1032 std::vector<Result> mResults{};
1042template <
typename Arg>
1047#if defined(_MSC_VER)
1048void doNotOptimizeAwaySink(
void const*);
1050template <
typename T>
1058template <
typename T>
1061 asm volatile(
"" : :
"r,m"(val) :
"memory");
1064template <
typename T>
1066# if defined(__clang__)
1068 asm volatile(
"" :
"+r,m"(val) : :
"memory");
1071 asm volatile(
"" :
"+m,r"(val) : :
"memory");
1118#if ANKERL_NANOBENCH(PERF_COUNTERS)
1119 LinuxPerformanceCounters* mPc =
nullptr;
1135 template <
typename Op>
1137 for (
auto& rangeMeasure :
data) {
1138 rangeMeasure.first = op(rangeMeasure.first);
1145 template <
typename Op>
1147 :
BigO(bigOName, mapRangeMeasure(rangeMeasure, rangeToN)) {}
1149 template <
typename Op>
1151 :
BigO(
std::move(bigOName), mapRangeMeasure(rangeMeasure, rangeToN)) {}
1163 double mNormalizedRootMeanSquare{};
1166std::ostream&
operator<<(std::ostream& os, std::vector<ankerl::nanobench::BigO>
const& bigOs);
1174namespace nanobench {
1181 return (std::numeric_limits<uint64_t>::max)();
1185uint64_t
Rng::operator()() noexcept {
1188 mX = UINT64_C(15241094284759029579) * mY;
1189 mY =
rotl(mY - x, 27);
1195uint32_t
Rng::bounded(uint32_t range) noexcept {
1196 uint64_t
const r32 =
static_cast<uint32_t
>(operator()());
1197 auto multiresult = r32 * range;
1198 return static_cast<uint32_t
>(multiresult >> 32U);
1202 auto i = (UINT64_C(0x3ff) << 52U) | (
operator()() >> 12U);
1206 std::memcpy(&d, &i,
sizeof(
double));
1210template <
typename Container>
1212 auto i = container.size();
1215 auto n = operator()();
1217 auto b1 =
static_cast<decltype(i)
>((
static_cast<uint32_t
>(n) *
static_cast<uint64_t
>(i)) >> 32U);
1218 swap(container[--i], container[b1]);
1220 auto b2 =
static_cast<decltype(i)
>(((n >> 32U) *
static_cast<uint64_t
>(i)) >> 32U);
1221 swap(container[--i], container[b2]);
1226constexpr uint64_t
Rng::
rotl(uint64_t x,
unsigned k) noexcept {
1227 return (x <<
k) | (x >> (64U -
k));
1232template <
typename SetupOp>
1236 : mSetupOp(
std::move(setupOp))
1239 template <
typename Op>
1242 assert((mBench.epochIterations() <= 1) &&
1243 "setup() runs once per epoch, not once per iteration; it requires epochIterations(1)");
1244 mBench.epochIterations(1);
1245 return mBench.runImpl(mSetupOp, std::forward<Op>(op));
1254template <
typename Op>
1257 auto setupOp = [] {};
1258 return runImpl(setupOp, std::forward<Op>(op));
1261template <
typename SetupOp,
typename Op>
1268 while (
auto n = iterationLogic.numIters()) {
1272 Clock::time_point
const before = Clock::now();
1276 Clock::time_point
const after = Clock::now();
1278 pc.updateResults(iterationLogic.numIters());
1279 iterationLogic.
add(after - before, pc);
1285template <
typename SetupOp>
1291template <
typename Op>
1293 name(benchmarkName);
1294 return run(std::forward<Op>(op));
1297template <
typename Op>
1299 name(benchmarkName);
1300 return run(std::forward<Op>(op));
1303template <
typename Op>
1308template <
typename Op>
1315template <
typename T>
1317 mConfig.mBatch =
static_cast<double>(b);
1322template <
typename T>
1324 mConfig.mComplexityN =
static_cast<double>(n);
1329template <
typename Arg>
1336template <
typename Arg>
1343#if defined(_MSC_VER)
1344template <
typename T>
1346 doNotOptimizeAwaySink(&val);
1355#if defined(ANKERL_NANOBENCH_IMPLEMENT)
1361# include <algorithm>
1371# include <stdexcept>
1373# if defined(__linux__)
1376# if ANKERL_NANOBENCH(PERF_COUNTERS)
1379# include <linux/perf_event.h>
1380# include <sys/ioctl.h>
1381# include <sys/syscall.h>
1387namespace nanobench {
1398class StreamStateRestorer;
1400class MarkDownColumn;
1411namespace nanobench {
1413uint64_t splitMix64(uint64_t& state)
noexcept;
1418template <
typename T>
1419inline double d(T t)
noexcept {
1420 return static_cast<double>(
t);
1422inline double d(Clock::duration duration)
noexcept {
1423 return std::chrono::duration_cast<std::chrono::duration<double>>(duration).
count();
1427inline Clock::duration clockResolution() noexcept;
1431namespace templates {
1433char const*
csv() noexcept {
1434 return R
"DELIM("title";"name";"unit";"batch";"elapsed";"error %";"instructions";"branches";"branch misses";"total"
1435{{#result}}"{{title}}";"{{name}}";"{{unit}}";{{batch}};{{median(elapsed)}};{{medianAbsolutePercentError(elapsed)}};{{median(instructions)}};{{median(branchinstructions)}};{{median(branchmisses)}};{{sumProduct(iterations, elapsed)}}
1440 return R
"DELIM(<html>
1443 <script src="https://cdn.plot.ly/plotly-latest.min.js"></script>
1447 <div id="myDiv"></div>
1452 y: [{{#measurement}}{{elapsed}}{{^-last}}, {{/last}}{{/measurement}}],
1456 var title = '{{title}}';
1458 data = data.map(a => Object.assign(a, { boxpoints: 'all', pointpos: 0, type: 'box' }));
1459 var layout = { title: { text: title }, showlegend: false, yaxis: { title: 'time per unit', rangemode: 'tozero', autorange: true } }; Plotly.newPlot('myDiv', data, layout, {responsive: true});
1466char const*
pyperf() noexcept {
1473{{#measurement}} {{elapsed}}{{^-last}},
1474{{/last}}{{/measurement}}
1481 "loops": {{sum(iterations)}},
1482 "inner_loops": {{batch}},
1483 "name": "{{title}}",
1490char const*
json() noexcept {
1494 "title": "{{title}}",
1498 "complexityN": {{complexityN}},
1499 "epochs": {{epochs}},
1500 "clockResolution": {{clockResolution}},
1501 "clockResolutionMultiple": {{clockResolutionMultiple}},
1502 "maxEpochTime": {{maxEpochTime}},
1503 "minEpochTime": {{minEpochTime}},
1504 "minEpochIterations": {{minEpochIterations}},
1505 "epochIterations": {{epochIterations}},
1506 "warmup": {{warmup}},
1507 "relative": {{relative}},
1508 "median(elapsed)": {{median(elapsed)}},
1509 "medianAbsolutePercentError(elapsed)": {{medianAbsolutePercentError(elapsed)}},
1510 "median(instructions)": {{median(instructions)}},
1511 "medianAbsolutePercentError(instructions)": {{medianAbsolutePercentError(instructions)}},
1512 "median(cpucycles)": {{median(cpucycles)}},
1513 "median(contextswitches)": {{median(contextswitches)}},
1514 "median(pagefaults)": {{median(pagefaults)}},
1515 "median(branchinstructions)": {{median(branchinstructions)}},
1516 "median(branchmisses)": {{median(branchmisses)}},
1517 "totalTime": {{sumProduct(iterations, elapsed)}},
1520 "iterations": {{iterations}},
1521 "elapsed": {{elapsed}},
1522 "pagefaults": {{pagefaults}},
1523 "cpucycles": {{cpucycles}},
1524 "contextswitches": {{contextswitches}},
1525 "instructions": {{instructions}},
1526 "branchinstructions": {{branchinstructions}},
1527 "branchmisses": {{branchmisses}}
1528 }{{^-last}},{{/-last}}
1530 }{{^-last}},{{/-last}}
1537 enum class Type { tag, content, section, inverted_section };
1541 std::vector<Node> children;
1546 bool operator==(
char const (&str)[N])
const noexcept {
1548 return static_cast<size_t>(std::distance(begin, end) + 1) == N && 0 == strncmp(str, begin, N - 1);
1554static std::vector<Node> parseMustacheTemplate(
char const** tpl) {
1555 std::vector<Node> nodes;
1558 auto const* begin = std::strstr(*tpl,
"{{");
1559 auto const* end = begin;
1560 if (begin !=
nullptr) {
1563 end = std::strstr(begin,
"}}");
1566 if (begin ==
nullptr || end ==
nullptr) {
1569 nodes.emplace_back(Node{*tpl, *tpl + std::strlen(*tpl), std::vector<Node>{}, Node::Type::content});
1574 nodes.emplace_back(Node{*tpl, begin - 2, std::vector<Node>{}, Node::Type::content});
1586 nodes.emplace_back(Node{begin + 1, end, parseMustacheTemplate(tpl), Node::Type::section});
1591 nodes.emplace_back(Node{begin + 1, end, parseMustacheTemplate(tpl), Node::Type::inverted_section});
1595 nodes.emplace_back(Node{begin, end, std::vector<Node>{}, Node::Type::tag});
1601static bool generateFirstLast(Node
const& n,
size_t idx,
size_t size, std::ostream&
out) {
1603 bool const matchFirst = n ==
"-first";
1604 bool const matchLast = n ==
"-last";
1605 if (!matchFirst && !matchLast) {
1609 bool doWrite =
false;
1610 if (n.type == Node::Type::section) {
1611 doWrite = (matchFirst && idx == 0) || (matchLast && idx == size - 1);
1612 }
else if (n.type == Node::Type::inverted_section) {
1613 doWrite = (matchFirst && idx != 0) || (matchLast && idx != size - 1);
1617 for (
auto const& child : n.children) {
1618 if (child.type == Node::Type::content) {
1619 out.write(child.begin, std::distance(child.begin, child.end));
1626static bool matchCmdArgs(std::string
const& str, std::vector<std::string>& matchResult) {
1627 matchResult.clear();
1628 auto idxOpen = str.find(
'(');
1629 auto idxClose = str.find(
')', idxOpen);
1630 if (idxClose == std::string::npos) {
1634 matchResult.emplace_back(str.substr(0, idxOpen));
1637 matchResult.emplace_back();
1638 for (
size_t i = idxOpen + 1; i != idxClose; ++i) {
1639 if (str[i] ==
' ' || str[i] ==
'\t') {
1643 if (str[i] ==
',') {
1645 matchResult.emplace_back();
1649 matchResult.back() += str[i];
1654static bool generateConfigTag(Node
const& n, Config
const& config, std::ostream&
out) {
1658 out << config.mBenchmarkTitle;
1662 out << config.mBenchmarkName;
1666 out << config.mUnit;
1670 out << config.mBatch;
1673 if (n ==
"complexityN") {
1674 out << config.mComplexityN;
1677 if (n ==
"epochs") {
1678 out << config.mNumEpochs;
1681 if (n ==
"clockResolution") {
1682 out << d(detail::clockResolution());
1685 if (n ==
"clockResolutionMultiple") {
1686 out << config.mClockResolutionMultiple;
1689 if (n ==
"maxEpochTime") {
1690 out << d(config.mMaxEpochTime);
1693 if (n ==
"minEpochTime") {
1694 out << d(config.mMinEpochTime);
1697 if (n ==
"minEpochIterations") {
1698 out << config.mMinEpochIterations;
1701 if (n ==
"epochIterations") {
1702 out << config.mEpochIterations;
1705 if (n ==
"warmup") {
1706 out << config.mWarmup;
1709 if (n ==
"relative") {
1710 out << config.mIsRelative;
1717static std::ostream& generateResultTag(Node
const& n,
Result const& r, std::ostream&
out) {
1718 if (generateConfigTag(n, r.config(),
out)) {
1726 std::vector<std::string> matchResult;
1727 if (matchCmdArgs(std::string(n.begin, n.end), matchResult)) {
1728 if (matchResult.size() == 2) {
1729 if (matchResult[0] ==
"context") {
1730 return out << r.context(matchResult[1]);
1738 if (matchResult[0] ==
"median") {
1739 return out << r.median(m);
1741 if (matchResult[0] ==
"average") {
1742 return out << r.average(m);
1744 if (matchResult[0] ==
"medianAbsolutePercentError") {
1745 return out << r.medianAbsolutePercentError(m);
1747 if (matchResult[0] ==
"sum") {
1748 return out << r.sum(m);
1750 if (matchResult[0] ==
"minimum") {
1751 return out << r.minimum(m);
1753 if (matchResult[0] ==
"maximum") {
1754 return out << r.maximum(m);
1756 }
else if (matchResult.size() == 3) {
1763 if (matchResult[0] ==
"sumProduct") {
1764 return out << r.sumProduct(m1, m2);
1773 throw std::runtime_error(
"command '" + std::string(n.begin, n.end) +
"' not understood");
1776static void generateResultMeasurement(std::vector<Node>
const& nodes,
size_t idx,
Result const& r, std::ostream&
out) {
1777 for (
auto const& n : nodes) {
1778 if (!generateFirstLast(n, idx, r.size(),
out)) {
1781 case Node::Type::content:
1782 out.write(n.begin, std::distance(n.begin, n.end));
1785 case Node::Type::inverted_section:
1786 throw std::runtime_error(
"got a inverted section inside measurement");
1788 case Node::Type::section:
1789 throw std::runtime_error(
"got a section inside measurement");
1791 case Node::Type::tag: {
1796 out << r.get(idx, m);
1805static void generateResult(std::vector<Node>
const& nodes,
size_t idx, std::vector<Result>
const& results, std::ostream&
out) {
1806 auto const& r = results[idx];
1807 for (
auto const& n : nodes) {
1808 if (!generateFirstLast(n, idx, results.size(),
out)) {
1811 case Node::Type::content:
1812 out.write(n.begin, std::distance(n.begin, n.end));
1815 case Node::Type::inverted_section:
1816 throw std::runtime_error(
"got a inverted section inside result");
1818 case Node::Type::section:
1819 if (n ==
"measurement") {
1820 for (
size_t i = 0; i < r.size(); ++i) {
1821 generateResultMeasurement(n.children, i, r,
out);
1824 throw std::runtime_error(
"got a section inside result");
1828 case Node::Type::tag:
1829 generateResultTag(n, r,
out);
1841char const* getEnv(
char const*
name);
1842bool isEndlessRunning(std::string
const&
name);
1843bool isWarningsEnabled();
1845template <
typename T>
1846T parseFile(std::string
const& filename,
bool* fail);
1848void gatherStabilityInformation(std::vector<std::string>& warnings, std::vector<std::string>& recommendations);
1849void printStabilityInformationOnce(std::ostream* outStream);
1852uint64_t& singletonHeaderHash() noexcept;
1855Clock::duration calcClockResolution(
size_t numEvaluations) noexcept;
1862class NumSep :
public std::numpunct<char> {
1864 explicit NumSep(
char sep);
1865 char do_thousands_sep()
const override;
1866 std::string do_grouping()
const override;
1875class StreamStateRestorer {
1877 explicit StreamStateRestorer(std::ostream&
s);
1878 ~StreamStateRestorer();
1884 StreamStateRestorer(StreamStateRestorer
const&) =
delete;
1885 StreamStateRestorer& operator=(StreamStateRestorer
const&) =
delete;
1886 StreamStateRestorer(StreamStateRestorer&&) =
delete;
1887 StreamStateRestorer& operator=(StreamStateRestorer&&) =
delete;
1890 std::ostream& mStream;
1891 std::locale mLocale;
1892 std::streamsize
const mPrecision;
1893 std::streamsize
const mWidth;
1894 std::ostream::char_type
const mFill;
1895 std::ostream::fmtflags
const mFmtFlags;
1902 Number(
int width,
int precision,
double value);
1903 Number(
int width,
int precision, int64_t value);
1907 friend std::ostream&
operator<<(std::ostream& os, Number
const& n);
1908 std::ostream& write(std::ostream& os)
const;
1916std::string to_s(uint64_t n);
1918std::ostream&
operator<<(std::ostream& os, Number
const& n);
1920class MarkDownColumn {
1922 MarkDownColumn(
int w,
int prec, std::string tit, std::string suff,
double val)
noexcept;
1932 std::string mSuffix;
1939 explicit MarkDownCode(std::string
const& what);
1942 friend std::ostream&
operator<<(std::ostream& os, MarkDownCode
const& mdCode);
1943 std::ostream& write(std::ostream& os)
const;
1945 std::string mWhat{};
1948std::ostream&
operator<<(std::ostream& os, MarkDownCode
const& mdCode);
1958namespace nanobench {
1961void render(
char const* mustacheTemplate, std::vector<Result>
const& results, std::ostream&
out) {
1962 detail::fmt::StreamStateRestorer
const restorer(
out);
1964 out.precision(std::numeric_limits<double>::digits10);
1965 auto nodes = templates::parseMustacheTemplate(&mustacheTemplate);
1967 for (
auto const& n : nodes) {
1970 case templates::Node::Type::content:
1971 out.write(n.begin, std::distance(n.begin, n.end));
1974 case templates::Node::Type::inverted_section:
1975 throw std::runtime_error(
"unknown list '" + std::string(n.begin, n.end) +
"'");
1977 case templates::Node::Type::section:
1978 if (n ==
"result") {
1979 const size_t nbResults = results.size();
1980 for (
size_t i = 0; i < nbResults; ++i) {
1981 generateResult(n.children, i, results,
out);
1983 }
else if (n ==
"measurement") {
1984 if (results.size() != 1) {
1985 throw std::runtime_error(
1986 "render: can only use section 'measurement' here if there is a single result, but there are " +
1987 detail::fmt::to_s(results.size()));
1990 auto const& r = results.front();
1991 for (
size_t i = 0; i < r.size(); ++i) {
1992 generateResultMeasurement(n.children, i, r,
out);
1995 throw std::runtime_error(
"render: unknown section '" + std::string(n.begin, n.end) +
"'");
1999 case templates::Node::Type::tag:
2000 if (results.size() == 1) {
2002 generateResultTag(n, results.front(),
out);
2005 if (!generateConfigTag(n, results.back().config(),
out)) {
2006 throw std::runtime_error(
"unknown tag '" + std::string(n.begin, n.end) +
"'");
2014void render(std::string
const& mustacheTemplate, std::vector<Result>
const& results, std::ostream&
out) {
2015 render(mustacheTemplate.c_str(), results,
out);
2018void render(
char const* mustacheTemplate,
const Bench& bench, std::ostream&
out) {
2019 render(mustacheTemplate, bench.results(),
out);
2022void render(std::string
const& mustacheTemplate,
const Bench& bench, std::ostream&
out) {
2023 render(mustacheTemplate.c_str(), bench.results(),
out);
2029# if defined(__clang__)
2030# pragma clang diagnostic push
2031# pragma clang diagnostic ignored "-Wexit-time-destructors"
2033 static PerformanceCounters pc;
2034# if defined(__clang__)
2035# pragma clang diagnostic pop
2044# if defined(_MSC_VER)
2045# pragma optimize("", off)
2046void doNotOptimizeAwaySink(
void const*) {}
2047# pragma optimize("", on)
2050template <
typename T>
2051T parseFile(std::string
const& filename,
bool* fail) {
2052 std::ifstream fin(filename);
2055 if (fail !=
nullptr) {
2061char const* getEnv(
char const*
name) {
2062# if defined(_MSC_VER)
2063# pragma warning(push)
2064# pragma warning(disable : 4996)
2066 return std::getenv(
name);
2067# if defined(_MSC_VER)
2068# pragma warning(pop)
2072bool isEndlessRunning(std::string
const&
name) {
2073 auto const*
const endless = getEnv(
"NANOBENCH_ENDLESS");
2074 return nullptr != endless && endless ==
name;
2078bool isWarningsEnabled() {
2079 auto const*
const suppression = getEnv(
"NANOBENCH_SUPPRESS_WARNINGS");
2080 return nullptr == suppression || suppression == std::string(
"0");
2083void gatherStabilityInformation(std::vector<std::string>& warnings, std::vector<std::string>& recommendations) {
2085 recommendations.clear();
2088 warnings.emplace_back(
"DEBUG defined");
2089 bool const recommendCheckFlags =
true;
2091 bool const recommendCheckFlags =
false;
2094 bool recommendPyPerf =
false;
2095# if defined(__linux__)
2096 auto nprocs = sysconf(_SC_NPROCESSORS_CONF);
2098 warnings.emplace_back(
"couldn't figure out number of processors - no governor, turbo check possible");
2101 for (
long id = 0;
id < nprocs; ++id) {
2102 auto idStr = detail::fmt::to_s(
static_cast<uint64_t
>(
id));
2103 auto sysCpu =
"/sys/devices/system/cpu/cpu" + idStr;
2104 auto minFreq = parseFile<int64_t>(sysCpu +
"/cpufreq/scaling_min_freq",
nullptr);
2105 auto maxFreq = parseFile<int64_t>(sysCpu +
"/cpufreq/scaling_max_freq",
nullptr);
2106 if (minFreq != maxFreq) {
2107 auto minMHz = d(minFreq) / 1000.0;
2108 auto maxMHz = d(maxFreq) / 1000.0;
2109 warnings.emplace_back(
"CPU frequency scaling enabled: CPU " + idStr +
" between " +
2110 detail::fmt::Number(1, 1, minMHz).to_s() +
" and " + detail::fmt::Number(1, 1, maxMHz).to_s() +
2112 recommendPyPerf =
true;
2118 auto currentGovernor = parseFile<std::string>(
"/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor", &fail);
2119 if (!fail &&
"performance" != currentGovernor) {
2120 warnings.emplace_back(
"CPU governor is '" + currentGovernor +
"' but should be 'performance'");
2121 recommendPyPerf =
true;
2124 auto noTurbo = parseFile<int>(
"/sys/devices/system/cpu/intel_pstate/no_turbo", &fail);
2125 if (!fail && noTurbo == 0) {
2126 warnings.emplace_back(
"Turbo is enabled, CPU frequency will fluctuate");
2127 recommendPyPerf =
true;
2132 if (recommendCheckFlags) {
2133 recommendations.emplace_back(
"Make sure you compile for Release");
2135 if (recommendPyPerf) {
2136 recommendations.emplace_back(
"Use 'pyperf system tune' before benchmarking. See https://github.com/psf/pyperf");
2140void printStabilityInformationOnce(std::ostream* outStream) {
2141 static bool shouldPrint =
true;
2142 if (shouldPrint && (
nullptr != outStream) && isWarningsEnabled()) {
2143 auto& os = *outStream;
2144 shouldPrint =
false;
2145 std::vector<std::string> warnings;
2146 std::vector<std::string> recommendations;
2147 gatherStabilityInformation(warnings, recommendations);
2148 if (warnings.empty()) {
2152 os <<
"Warning, results might be unstable:" << std::endl;
2153 for (
auto const& w : warnings) {
2154 os <<
"* " << w << std::endl;
2157 os << std::endl <<
"Recommendations" << std::endl;
2158 for (
auto const& r : recommendations) {
2159 os <<
"* " << r << std::endl;
2165uint64_t& singletonHeaderHash() noexcept {
2166 static uint64_t sHeaderHash{};
2171inline uint64_t hash_combine(uint64_t seed, uint64_t val) {
2172 return seed ^ (val + UINT64_C(0x9e3779b9) + (seed << 6U) + (seed >> 2U));
2176Clock::duration calcClockResolution(
size_t numEvaluations)
noexcept {
2177 auto bestDuration = Clock::duration::max();
2178 Clock::time_point tBegin;
2179 Clock::time_point tEnd;
2180 for (
size_t i = 0; i < numEvaluations; ++i) {
2181 tBegin = Clock::now();
2183 tEnd = Clock::now();
2184 }
while (tBegin == tEnd);
2185 bestDuration = (std::min)(bestDuration, tEnd - tBegin);
2187 return bestDuration;
2191Clock::duration clockResolution() noexcept {
2192 static Clock::duration
const sResolution = calcClockResolution(20);
2197struct IterationLogic::Impl {
2198 enum class State { warmup, upscaling_runtime, measuring, endless };
2200 explicit Impl(Bench
const& bench)
2202 , mResult(bench.config()) {
2203 printStabilityInformationOnce(mBench.output());
2206 mTargetRuntimePerEpoch = detail::clockResolution() * mBench.clockResolutionMultiple();
2207 if (mTargetRuntimePerEpoch > mBench.maxEpochTime()) {
2208 mTargetRuntimePerEpoch = mBench.maxEpochTime();
2210 if (mTargetRuntimePerEpoch < mBench.minEpochTime()) {
2211 mTargetRuntimePerEpoch = mBench.minEpochTime();
2214 if (isEndlessRunning(mBench.name())) {
2215 std::cerr <<
"NANOBENCH_ENDLESS set: running '" << mBench.name() <<
"' endlessly" << std::endl;
2216 mNumIters = (std::numeric_limits<uint64_t>::max)();
2217 mState = State::endless;
2218 }
else if (0 != mBench.warmup()) {
2219 mNumIters = mBench.warmup();
2220 mState = State::warmup;
2221 }
else if (0 != mBench.epochIterations()) {
2223 mNumIters = mBench.epochIterations();
2224 mState = State::measuring;
2226 mNumIters = mBench.minEpochIterations();
2227 mState = State::upscaling_runtime;
2232 ANKERL_NANOBENCH(NODISCARD) uint64_t calcBestNumIters(std::chrono::nanoseconds elapsed, uint64_t iters)
noexcept {
2233 auto doubleElapsed = d(elapsed);
2234 auto doubleTargetRuntimePerEpoch = d(mTargetRuntimePerEpoch);
2235 auto doubleNewIters = doubleTargetRuntimePerEpoch / doubleElapsed * d(iters);
2237 auto doubleMinEpochIters = d(mBench.minEpochIterations());
2238 if (doubleNewIters < doubleMinEpochIters) {
2239 doubleNewIters = doubleMinEpochIters;
2241 doubleNewIters *= 1.0 + 0.2 * mRng.uniform01();
2245 return static_cast<uint64_t
>(doubleNewIters + 0.5);
2249 if (elapsed * 10 < mTargetRuntimePerEpoch) {
2251 if (mNumIters * 10 < mNumIters) {
2253 showResult(
"iterations overflow. Maybe your code got optimized away?");
2259 mNumIters = calcBestNumIters(elapsed, mNumIters);
2263 void add(std::chrono::nanoseconds elapsed, PerformanceCounters
const& pc)
noexcept {
2264# if defined(ANKERL_NANOBENCH_LOG_ENABLED)
2265 auto oldIters = mNumIters;
2270 if (isCloseEnoughForMeasurements(elapsed)) {
2273 mState = State::measuring;
2274 mNumIters = calcBestNumIters(elapsed, mNumIters);
2277 mState = State::upscaling_runtime;
2282 case State::upscaling_runtime:
2283 if (isCloseEnoughForMeasurements(elapsed)) {
2285 mState = State::measuring;
2286 mTotalElapsed += elapsed;
2287 mTotalNumIters += mNumIters;
2288 mResult.add(elapsed, mNumIters, pc);
2289 mNumIters = calcBestNumIters(mTotalElapsed, mTotalNumIters);
2295 case State::measuring:
2298 mTotalElapsed += elapsed;
2299 mTotalNumIters += mNumIters;
2300 mResult.add(elapsed, mNumIters, pc);
2301 if (0 != mBench.epochIterations()) {
2302 mNumIters = mBench.epochIterations();
2304 mNumIters = calcBestNumIters(mTotalElapsed, mTotalNumIters);
2308 case State::endless:
2309 mNumIters = (std::numeric_limits<uint64_t>::max)();
2313 if (
static_cast<uint64_t
>(mResult.size()) == mBench.epochs()) {
2319 ANKERL_NANOBENCH_LOG(mBench.name() <<
": " << detail::fmt::Number(20, 3, d(elapsed.count())) <<
" elapsed, "
2320 << detail::fmt::Number(20, 3, d(mTargetRuntimePerEpoch.count())) <<
" target. oldIters="
2321 << oldIters <<
", mNumIters=" << mNumIters <<
", mState=" <<
static_cast<int>(mState));
2325 void showResult(std::string
const& errorMessage)
const {
2328 if (mBench.output() !=
nullptr) {
2330 std::vector<fmt::MarkDownColumn> columns;
2334 if (mBench.relative()) {
2336 if (!mBench.results().empty()) {
2339 columns.emplace_back(11, 1,
"relative",
"%", d);
2342 if (mBench.complexityN() > 0) {
2343 columns.emplace_back(14, 0,
"complexityN",
"", mBench.complexityN());
2346 columns.emplace_back(22, 2, mBench.timeUnitName() +
"/" + mBench.unit(),
"",
2347 rMedian / (mBench.timeUnit().count() * mBench.batch()));
2348 columns.emplace_back(22, 2, mBench.unit() +
"/s",
"", rMedian <= 0.0 ? 0.0 : mBench.batch() / rMedian);
2351 columns.emplace_back(10, 1,
"err%",
"%", rErrorMedian * 100.0);
2353 double rInsMedian = -1.0;
2356 columns.emplace_back(18, 2,
"ins/" + mBench.unit(),
"", rInsMedian / mBench.batch());
2359 double rCycMedian = -1.0;
2362 columns.emplace_back(18, 2,
"cyc/" + mBench.unit(),
"", rCycMedian / mBench.batch());
2364 if (rInsMedian > 0.0 && rCycMedian > 0.0) {
2365 columns.emplace_back(9, 3,
"IPC",
"", rCycMedian <= 0.0 ? 0.0 : rInsMedian / rCycMedian);
2369 columns.emplace_back(17, 2,
"bra/" + mBench.unit(),
"", rBraMedian / mBench.batch());
2372 if (rBraMedian >= 1e-9) {
2375 columns.emplace_back(10, 1,
"miss%",
"%", p);
2382 auto& os = *mBench.output();
2386 hash = hash_combine(std::hash<std::string>{}(mBench.unit()), hash);
2387 hash = hash_combine(std::hash<std::string>{}(mBench.title()), hash);
2388 hash = hash_combine(std::hash<std::string>{}(mBench.timeUnitName()), hash);
2389 hash = hash_combine(std::hash<double>{}(mBench.timeUnit().
count()), hash);
2390 hash = hash_combine(std::hash<bool>{}(mBench.relative()), hash);
2391 hash = hash_combine(std::hash<bool>{}(mBench.performanceCounters()), hash);
2393 if (hash != singletonHeaderHash()) {
2394 singletonHeaderHash() = hash;
2398 for (
auto const& col : columns) {
2401 os <<
"| " << mBench.title() << std::endl;
2403 for (
auto const& col : columns) {
2404 os << col.separator();
2406 os <<
"|:" << std::string(mBench.title().size() + 1U,
'-') << std::endl;
2409 if (!errorMessage.empty()) {
2410 for (
auto const& col : columns) {
2411 os << col.invalid();
2413 os <<
"| :boom: " << fmt::MarkDownCode(mBench.name()) <<
" (" << errorMessage <<
')' << std::endl;
2415 for (
auto const& col : columns) {
2419 auto showUnstable = isWarningsEnabled() && rErrorMedian >= 0.05;
2421 os <<
":wavy_dash: ";
2423 os << fmt::MarkDownCode(mBench.name());
2425 auto avgIters = d(mTotalNumIters) / d(mBench.epochs());
2427 auto suggestedIters =
static_cast<uint64_t
>(avgIters * 10 + 0.5);
2429 os <<
" (Unstable with ~" << detail::fmt::Number(1, 1, avgIters)
2430 <<
" iters. Increase `minEpochIterations` to e.g. " << suggestedIters <<
")";
2437 ANKERL_NANOBENCH(NODISCARD)
bool isCloseEnoughForMeasurements(std::chrono::nanoseconds elapsed)
const noexcept {
2438 return elapsed * 3 >= mTargetRuntimePerEpoch * 2;
2441 uint64_t mNumIters = 1;
2442 Bench
const& mBench;
2443 std::chrono::nanoseconds mTargetRuntimePerEpoch{};
2446 std::chrono::nanoseconds mTotalElapsed{};
2447 uint64_t mTotalNumIters = 0;
2448 State mState = State::upscaling_runtime;
2452IterationLogic::IterationLogic(Bench
const& bench)
2453 : mPimpl(new Impl(bench)) {}
2455IterationLogic::~IterationLogic() {
2459uint64_t IterationLogic::numIters() const noexcept {
2461 return mPimpl->mNumIters;
2464void IterationLogic::add(std::chrono::nanoseconds elapsed, PerformanceCounters
const& pc)
noexcept {
2465 mPimpl->add(elapsed, pc);
2468void IterationLogic::moveResultTo(std::vector<Result>& results)
noexcept {
2469 results.emplace_back(std::move(mPimpl->mResult));
2472# if ANKERL_NANOBENCH(PERF_COUNTERS)
2475class LinuxPerformanceCounters {
2478 Target(uint64_t* targetValue_,
bool correctMeasuringOverhead_,
bool correctLoopOverhead_)
2479 : targetValue(targetValue_)
2480 , correctMeasuringOverhead(correctMeasuringOverhead_)
2481 , correctLoopOverhead(correctLoopOverhead_) {}
2483 uint64_t* targetValue{};
2484 bool correctMeasuringOverhead{};
2485 bool correctLoopOverhead{};
2488 LinuxPerformanceCounters() =
default;
2489 LinuxPerformanceCounters(LinuxPerformanceCounters
const&) =
delete;
2490 LinuxPerformanceCounters(LinuxPerformanceCounters&&) =
delete;
2491 LinuxPerformanceCounters& operator=(LinuxPerformanceCounters
const&) =
delete;
2492 LinuxPerformanceCounters& operator=(LinuxPerformanceCounters&&) =
delete;
2493 ~LinuxPerformanceCounters();
2496 inline void start() {}
2498 inline void stop() {}
2500 bool monitor(perf_sw_ids swId, Target target);
2501 bool monitor(perf_hw_id hwId, Target target);
2509 inline void beginMeasure() {
2515 mHasError = -1 == ioctl(mFd, PERF_EVENT_IOC_RESET, PERF_IOC_FLAG_GROUP);
2521 mHasError = -1 == ioctl(mFd, PERF_EVENT_IOC_ENABLE, PERF_IOC_FLAG_GROUP);
2524 inline void endMeasure() {
2530 mHasError = (-1 == ioctl(mFd, PERF_EVENT_IOC_DISABLE, PERF_IOC_FLAG_GROUP));
2535 auto const numBytes =
sizeof(uint64_t) * mCounters.size();
2536 auto ret = read(mFd, mCounters.data(), numBytes);
2537 mHasError =
ret !=
static_cast<ssize_t
>(numBytes);
2540 void updateResults(uint64_t numIters);
2543 template <
typename T>
2544 static inline T divRounded(T a, T divisor) {
2545 return (a + divisor / 2) / divisor;
2549 static inline uint32_t mix(uint32_t x) noexcept {
2556 template <
typename Op>
2558 void calibrate(Op&& op) {
2560 for (
auto& v : mCalibratedOverhead) {
2565 auto newCalibration = mCalibratedOverhead;
2566 for (
auto& v : newCalibration) {
2567 v = (std::numeric_limits<uint64_t>::max)();
2569 for (
size_t iter = 0; iter < 100; ++iter) {
2577 for (
size_t i = 0; i < newCalibration.size(); ++i) {
2578 auto diff = mCounters[i];
2579 if (newCalibration[i] > diff) {
2580 newCalibration[i] = diff;
2585 mCalibratedOverhead = std::move(newCalibration);
2592 uint64_t
const numIters = 100000U + (std::random_device{}() & 3U);
2593 uint64_t n = numIters;
2594 uint32_t x = 1234567;
2602 auto measure1 = mCounters;
2613 auto measure2 = mCounters;
2615 for (
size_t i = 0; i < mCounters.size(); ++i) {
2617 auto m1 = measure1[i] > mCalibratedOverhead[i] ? measure1[i] - mCalibratedOverhead[i] : 0;
2618 auto m2 = measure2[i] > mCalibratedOverhead[i] ? measure2[i] - mCalibratedOverhead[i] : 0;
2619 auto overhead = m1 * 2 > m2 ? m1 * 2 - m2 : 0;
2621 mLoopOverhead[i] = divRounded(overhead, numIters);
2627 bool monitor(uint32_t type, uint64_t eventid, Target target);
2629 std::map<uint64_t, Target> mIdToTarget{};
2632 std::vector<uint64_t> mCounters{3};
2633 std::vector<uint64_t> mCalibratedOverhead{3};
2634 std::vector<uint64_t> mLoopOverhead{3};
2636 uint64_t mTimeEnabledNanos = 0;
2637 uint64_t mTimeRunningNanos = 0;
2639 bool mHasError =
false;
2643LinuxPerformanceCounters::~LinuxPerformanceCounters() {
2649bool LinuxPerformanceCounters::monitor(perf_sw_ids swId, LinuxPerformanceCounters::Target target) {
2650 return monitor(PERF_TYPE_SOFTWARE, swId, target);
2653bool LinuxPerformanceCounters::monitor(perf_hw_id hwId, LinuxPerformanceCounters::Target target) {
2654 return monitor(PERF_TYPE_HARDWARE, hwId, target);
2659void LinuxPerformanceCounters::updateResults(uint64_t numIters) {
2661 for (
auto& id_value : mIdToTarget) {
2662 *id_value.second.targetValue = UINT64_C(0);
2669 mTimeEnabledNanos = mCounters[1] - mCalibratedOverhead[1];
2670 mTimeRunningNanos = mCounters[2] - mCalibratedOverhead[2];
2672 for (uint64_t i = 0; i < mCounters[0]; ++i) {
2673 auto idx =
static_cast<size_t>(3 + i * 2 + 0);
2674 auto id = mCounters[idx + 1U];
2676 auto it = mIdToTarget.find(
id);
2677 if (it != mIdToTarget.end()) {
2679 auto& tgt = it->second;
2680 *tgt.targetValue = mCounters[idx];
2681 if (tgt.correctMeasuringOverhead) {
2682 if (*tgt.targetValue >= mCalibratedOverhead[idx]) {
2683 *tgt.targetValue -= mCalibratedOverhead[idx];
2685 *tgt.targetValue = 0U;
2688 if (tgt.correctLoopOverhead) {
2689 auto correctionVal = mLoopOverhead[idx] * numIters;
2690 if (*tgt.targetValue >= correctionVal) {
2691 *tgt.targetValue -= correctionVal;
2693 *tgt.targetValue = 0U;
2700bool LinuxPerformanceCounters::monitor(uint32_t type, uint64_t eventid, Target target) {
2701 *target.targetValue = (std::numeric_limits<uint64_t>::max)();
2706 auto pea = perf_event_attr();
2707 std::memset(&pea, 0,
sizeof(perf_event_attr));
2709 pea.size =
sizeof(perf_event_attr);
2710 pea.config = eventid;
2712 pea.exclude_kernel = 1;
2716 pea.read_format = PERF_FORMAT_GROUP | PERF_FORMAT_ID | PERF_FORMAT_TOTAL_TIME_ENABLED | PERF_FORMAT_TOTAL_TIME_RUNNING;
2720# if defined(PERF_FLAG_FD_CLOEXEC)
2721 const unsigned long flags = PERF_FLAG_FD_CLOEXEC;
2723 const unsigned long flags = 0;
2727 auto fd =
static_cast<int>(syscall(__NR_perf_event_open, &pea, pid, cpu, mFd,
flags));
2737 if (-1 == ioctl(fd, PERF_EVENT_IOC_ID, &
id)) {
2743 mIdToTarget.emplace(
id, target);
2746 auto size = 3 + 2 * mIdToTarget.size();
2747 mCounters.resize(size);
2748 mCalibratedOverhead.resize(size);
2749 mLoopOverhead.resize(size);
2754PerformanceCounters::PerformanceCounters()
2755 : mPc(new LinuxPerformanceCounters())
2760 mHas.cpuCycles = mPc->monitor(PERF_COUNT_HW_REF_CPU_CYCLES, LinuxPerformanceCounters::Target(&mVal.cpuCycles,
true,
false));
2761 if (!mHas.cpuCycles) {
2763 mHas.cpuCycles = mPc->monitor(PERF_COUNT_HW_CPU_CYCLES, LinuxPerformanceCounters::Target(&mVal.cpuCycles,
true,
false));
2765 mHas.instructions = mPc->monitor(PERF_COUNT_HW_INSTRUCTIONS, LinuxPerformanceCounters::Target(&mVal.instructions,
true,
true));
2766 mHas.branchInstructions =
2767 mPc->monitor(PERF_COUNT_HW_BRANCH_INSTRUCTIONS, LinuxPerformanceCounters::Target(&mVal.branchInstructions,
true,
false));
2768 mHas.branchMisses = mPc->monitor(PERF_COUNT_HW_BRANCH_MISSES, LinuxPerformanceCounters::Target(&mVal.branchMisses,
true,
false));
2772 mHas.pageFaults = mPc->monitor(PERF_COUNT_SW_PAGE_FAULTS, LinuxPerformanceCounters::Target(&mVal.pageFaults,
true,
false));
2773 mHas.contextSwitches =
2774 mPc->monitor(PERF_COUNT_SW_CONTEXT_SWITCHES, LinuxPerformanceCounters::Target(&mVal.contextSwitches,
true,
false));
2778 auto before = ankerl::nanobench::Clock::now();
2779 auto after = ankerl::nanobench::Clock::now();
2784 if (mPc->hasError()) {
2786 mHas = PerfCountSet<bool>{};
2790PerformanceCounters::~PerformanceCounters() {
2795void PerformanceCounters::beginMeasure() {
2796 mPc->beginMeasure();
2799void PerformanceCounters::endMeasure() {
2803void PerformanceCounters::updateResults(uint64_t numIters) {
2804 mPc->updateResults(numIters);
2809PerformanceCounters::PerformanceCounters() =
default;
2810PerformanceCounters::~PerformanceCounters() =
default;
2811void PerformanceCounters::beginMeasure() {}
2812void PerformanceCounters::endMeasure() {}
2813void PerformanceCounters::updateResults(uint64_t) {}
2817ANKERL_NANOBENCH(NODISCARD) PerfCountSet<uint64_t>
const& PerformanceCounters::val() const noexcept {
2820ANKERL_NANOBENCH(NODISCARD) PerfCountSet<bool>
const& PerformanceCounters::has() const noexcept {
2828NumSep::NumSep(
char sep)
2831char NumSep::do_thousands_sep()
const {
2835std::string NumSep::do_grouping()
const {
2840StreamStateRestorer::StreamStateRestorer(std::ostream&
s)
2842 , mLocale(
s.getloc())
2843 , mPrecision(
s.precision())
2846 , mFmtFlags(
s.
flags()) {}
2848StreamStateRestorer::~StreamStateRestorer() {
2853void StreamStateRestorer::restore() {
2854 mStream.imbue(mLocale);
2855 mStream.precision(mPrecision);
2856 mStream.width(mWidth);
2857 mStream.fill(mFill);
2858 mStream.flags(mFmtFlags);
2861Number::Number(
int width,
int precision, int64_t value)
2863 , mPrecision(precision)
2864 , mValue(d(value)) {}
2866Number::Number(
int width,
int precision,
double value)
2868 , mPrecision(precision)
2871std::ostream& Number::write(std::ostream& os)
const {
2872 StreamStateRestorer
const restorer(os);
2873 os.imbue(std::locale(os.getloc(),
new NumSep(
',')));
2874 os << std::setw(mWidth) << std::setprecision(mPrecision) << std::fixed << mValue;
2878std::string Number::to_s()
const {
2879 std::stringstream ss;
2884std::string to_s(uint64_t n) {
2887 str +=
static_cast<char>(
'0' +
static_cast<char>(n % 10));
2890 std::reverse(str.begin(), str.end());
2894std::ostream&
operator<<(std::ostream& os, Number
const& n) {
2898MarkDownColumn::MarkDownColumn(
int w,
int prec, std::string tit, std::string suff,
double val) noexcept
2901 , mTitle(std::move(tit))
2902 , mSuffix(std::move(suff))
2905std::string MarkDownColumn::title()
const {
2906 std::stringstream ss;
2907 ss <<
'|' << std::setw(mWidth - 2) << std::right << mTitle <<
' ';
2911std::string MarkDownColumn::separator()
const {
2912 std::string sep(
static_cast<size_t>(mWidth),
'-');
2918std::string MarkDownColumn::invalid()
const {
2919 std::string sep(
static_cast<size_t>(mWidth),
' ');
2921 sep[sep.size() - 2] =
'-';
2925std::string MarkDownColumn::value()
const {
2926 std::stringstream ss;
2927 auto width = mWidth - 2 -
static_cast<int>(mSuffix.size());
2928 ss <<
'|' << Number(width, mPrecision, mValue) << mSuffix <<
' ';
2933MarkDownCode::MarkDownCode(std::string
const& what) {
2934 mWhat.reserve(what.size() + 2);
2935 mWhat.push_back(
'`');
2936 for (
char const c : what) {
2939 mWhat.push_back(
'`');
2942 mWhat.push_back(
'`');
2945std::ostream& MarkDownCode::write(std::ostream& os)
const {
2949std::ostream&
operator<<(std::ostream& os, MarkDownCode
const& mdCode) {
2950 return mdCode.write(os);
2956Config::Config() =
default;
2957Config::~Config() =
default;
2958Config& Config::operator=(Config
const&) =
default;
2959Config& Config::operator=(Config&&) noexcept(
ANKERL_NANOBENCH(NOEXCEPT_STRING_MOVE)) = default;
2960Config::Config(Config const&) = default;
2961Config::Config(Config&&) noexcept = default;
2971template <
typename T>
2972inline constexpr typename std::underlying_type<T>::type u(T val)
noexcept {
2973 return static_cast<typename std::underlying_type<T>::type
>(val);
2979 : mConfig(
std::move(benchmarkConfig))
2980 , mNameToMeasurements{
detail::u(
Result::Measure::_size)} {}
2982void Result::add(Clock::duration totalElapsed, uint64_t iters, detail::PerformanceCounters
const& pc) {
2986 double const dIters = d(iters);
2987 mNameToMeasurements[u(Result::Measure::iterations)].push_back(dIters);
2989 mNameToMeasurements[u(Result::Measure::elapsed)].push_back(d(totalElapsed) / dIters);
2990 if (pc.has().pageFaults) {
2991 mNameToMeasurements[u(Result::Measure::pagefaults)].push_back(d(pc.val().pageFaults) / dIters);
2993 if (pc.has().cpuCycles) {
2994 mNameToMeasurements[u(Result::Measure::cpucycles)].push_back(d(pc.val().cpuCycles) / dIters);
2996 if (pc.has().contextSwitches) {
2997 mNameToMeasurements[u(Result::Measure::contextswitches)].push_back(d(pc.val().contextSwitches) / dIters);
2999 if (pc.has().instructions) {
3000 mNameToMeasurements[u(Result::Measure::instructions)].push_back(d(pc.val().instructions) / dIters);
3002 if (pc.has().branchInstructions) {
3003 double branchInstructions = 0.0;
3005 if (pc.val().branchInstructions > iters + 1U) {
3006 branchInstructions = d(pc.val().branchInstructions - (iters + 1U));
3008 mNameToMeasurements[u(Result::Measure::branchinstructions)].push_back(branchInstructions / dIters);
3010 if (pc.has().branchMisses) {
3012 double branchMisses = d(pc.val().branchMisses);
3013 if (branchMisses > branchInstructions) {
3015 branchMisses = branchInstructions;
3019 branchMisses -= 1.0;
3020 if (branchMisses < 1.0) {
3023 mNameToMeasurements[u(Result::Measure::branchmisses)].push_back(branchMisses / dIters);
3028Config
const& Result::config() const noexcept {
3032inline double calcMedian(std::vector<double>&
data) {
3036 std::sort(
data.begin(),
data.end());
3038 auto midIdx =
data.size() / 2U;
3039 if (1U == (
data.size() & 1U)) {
3040 return data[midIdx];
3042 return (
data[midIdx - 1U] +
data[midIdx]) / 2U;
3045double Result::median(Measure m)
const {
3047 auto data = mNameToMeasurements[detail::u(m)];
3048 return calcMedian(
data);
3051double Result::average(Measure m)
const {
3053 auto const&
data = mNameToMeasurements[detail::u(m)];
3059 return sum(m) / d(
data.size());
3062double Result::medianAbsolutePercentError(Measure m)
const {
3064 auto data = mNameToMeasurements[detail::u(m)];
3068 auto med = calcMedian(
data);
3071 for (
auto& x :
data) {
3077 return calcMedian(
data);
3081 auto const&
data = mNameToMeasurements[detail::u(m)];
3082 return std::accumulate(
data.begin(),
data.end(), 0.0);
3085double Result::sumProduct(Measure m1, Measure m2)
const noexcept {
3086 auto const& data1 = mNameToMeasurements[detail::u(m1)];
3087 auto const& data2 = mNameToMeasurements[detail::u(m2)];
3089 if (data1.size() != data2.size()) {
3093 double result = 0.0;
3094 for (
size_t i = 0,
s = data1.size(); i !=
s; ++i) {
3095 result += data1[i] * data2[i];
3100bool Result::has(Measure m)
const noexcept {
3101 return !mNameToMeasurements[detail::u(m)].empty();
3104double Result::get(
size_t idx, Measure m)
const {
3105 auto const&
data = mNameToMeasurements[detail::u(m)];
3106 return data.at(idx);
3109bool Result::empty() const noexcept {
3110 return 0U == size();
3113size_t Result::size() const noexcept {
3114 auto const&
data = mNameToMeasurements[detail::u(Measure::elapsed)];
3118double Result::minimum(Measure m)
const noexcept {
3119 auto const&
data = mNameToMeasurements[detail::u(m)];
3125 return *std::min_element(
data.begin(),
data.end());
3128double Result::maximum(Measure m)
const noexcept {
3129 auto const&
data = mNameToMeasurements[detail::u(m)];
3135 return *std::max_element(
data.begin(),
data.end());
3138std::string
const& Result::context(
char const* variableName)
const {
3139 return mConfig.mContext.at(variableName);
3142std::string
const& Result::context(std::string
const& variableName)
const {
3143 return mConfig.mContext.at(variableName);
3146Result::Measure Result::fromString(std::string
const& str) {
3147 if (str ==
"elapsed") {
3148 return Measure::elapsed;
3150 if (str ==
"iterations") {
3151 return Measure::iterations;
3153 if (str ==
"pagefaults") {
3154 return Measure::pagefaults;
3156 if (str ==
"cpucycles") {
3157 return Measure::cpucycles;
3159 if (str ==
"contextswitches") {
3160 return Measure::contextswitches;
3162 if (str ==
"instructions") {
3163 return Measure::instructions;
3165 if (str ==
"branchinstructions") {
3166 return Measure::branchinstructions;
3168 if (str ==
"branchmisses") {
3169 return Measure::branchmisses;
3172 return Measure::_size;
3177 mConfig.mOut = &std::cout;
3180Bench::Bench(Bench&&) noexcept = default;
3181Bench& Bench::operator=(Bench&&) noexcept(
ANKERL_NANOBENCH(NOEXCEPT_STRING_MOVE)) = default;
3182Bench::Bench(Bench const&) = default;
3183Bench& Bench::operator=(Bench const&) = default;
3184Bench::~Bench() noexcept = default;
3186double Bench::batch() const noexcept {
3187 return mConfig.mBatch;
3190double Bench::complexityN() const noexcept {
3191 return mConfig.mComplexityN;
3196Bench& Bench::relative(
bool isRelativeEnabled)
noexcept {
3197 mConfig.mIsRelative = isRelativeEnabled;
3200bool Bench::relative() const noexcept {
3201 return mConfig.mIsRelative;
3205 mConfig.mShowPerformanceCounters = showPerformanceCounters;
3209 return mConfig.mShowPerformanceCounters;
3215Bench& Bench::unit(
char const* u) {
3216 if (u != mConfig.mUnit) {
3223Bench& Bench::unit(std::string
const& u) {
3224 return unit(u.c_str());
3227std::string
const& Bench::unit() const noexcept {
3228 return mConfig.mUnit;
3231Bench& Bench::timeUnit(std::chrono::duration<double>
const& tu, std::string
const& tuName) {
3232 mConfig.mTimeUnit = tu;
3233 mConfig.mTimeUnitName = tuName;
3237std::string
const& Bench::timeUnitName() const noexcept {
3238 return mConfig.mTimeUnitName;
3241std::chrono::duration<double>
const& Bench::timeUnit() const noexcept {
3242 return mConfig.mTimeUnit;
3246Bench& Bench::title(
const char* benchmarkTitle) {
3247 if (benchmarkTitle != mConfig.mBenchmarkTitle) {
3250 mConfig.mBenchmarkTitle = benchmarkTitle;
3253Bench& Bench::title(std::string
const& benchmarkTitle) {
3254 if (benchmarkTitle != mConfig.mBenchmarkTitle) {
3257 mConfig.mBenchmarkTitle = benchmarkTitle;
3261std::string
const& Bench::title() const noexcept {
3262 return mConfig.mBenchmarkTitle;
3266 mConfig.mBenchmarkName = benchmarkName;
3270Bench&
Bench::name(std::string
const& benchmarkName) {
3271 mConfig.mBenchmarkName = benchmarkName;
3276 return mConfig.mBenchmarkName;
3279Bench& Bench::context(
char const* variableName,
char const* variableValue) {
3280 mConfig.mContext[variableName] = variableValue;
3284Bench& Bench::context(std::string
const& variableName, std::string
const& variableValue) {
3285 mConfig.mContext[variableName] = variableValue;
3289Bench& Bench::clearContext() {
3290 mConfig.mContext.clear();
3295Bench& Bench::epochs(
size_t numEpochs)
noexcept {
3296 mConfig.mNumEpochs = numEpochs;
3299size_t Bench::epochs() const noexcept {
3300 return mConfig.mNumEpochs;
3304Bench& Bench::clockResolutionMultiple(
size_t multiple)
noexcept {
3305 mConfig.mClockResolutionMultiple = multiple;
3308size_t Bench::clockResolutionMultiple() const noexcept {
3309 return mConfig.mClockResolutionMultiple;
3313Bench& Bench::maxEpochTime(std::chrono::nanoseconds t)
noexcept {
3314 mConfig.mMaxEpochTime =
t;
3317std::chrono::nanoseconds Bench::maxEpochTime() const noexcept {
3318 return mConfig.mMaxEpochTime;
3322Bench& Bench::minEpochTime(std::chrono::nanoseconds t)
noexcept {
3323 mConfig.mMinEpochTime =
t;
3326std::chrono::nanoseconds Bench::minEpochTime() const noexcept {
3327 return mConfig.mMinEpochTime;
3330Bench& Bench::minEpochIterations(uint64_t numIters)
noexcept {
3331 mConfig.mMinEpochIterations = (numIters == 0) ? 1 : numIters;
3334uint64_t Bench::minEpochIterations() const noexcept {
3335 return mConfig.mMinEpochIterations;
3338Bench& Bench::epochIterations(uint64_t numIters)
noexcept {
3339 mConfig.mEpochIterations = numIters;
3342uint64_t Bench::epochIterations() const noexcept {
3343 return mConfig.mEpochIterations;
3346Bench& Bench::warmup(uint64_t numWarmupIters)
noexcept {
3347 mConfig.mWarmup = numWarmupIters;
3350uint64_t Bench::warmup() const noexcept {
3351 return mConfig.mWarmup;
3354Bench& Bench::config(Config
const& benchmarkConfig) {
3355 mConfig = benchmarkConfig;
3358Config
const& Bench::config() const noexcept {
3362Bench& Bench::output(std::ostream* outstream)
noexcept {
3363 mConfig.mOut = outstream;
3368 return mConfig.mOut;
3371std::vector<Result>
const& Bench::results() const noexcept {
3375Bench&
Bench::render(
char const* templateContent, std::ostream& os) {
3380Bench&
Bench::render(std::string
const& templateContent, std::ostream& os) {
3385std::vector<BigO> Bench::complexityBigO()
const {
3386 std::vector<BigO> bigOs;
3387 auto rangeMeasure = BigO::collectRangeMeasure(mResults);
3388 bigOs.emplace_back(
"O(1)", rangeMeasure, [](
double) {
3391 bigOs.emplace_back(
"O(n)", rangeMeasure, [](
double n) {
3394 bigOs.emplace_back(
"O(log n)", rangeMeasure, [](
double n) {
3395 return std::log2(n);
3397 bigOs.emplace_back(
"O(n log n)", rangeMeasure, [](
double n) {
3398 return n * std::log2(n);
3400 bigOs.emplace_back(
"O(n^2)", rangeMeasure, [](
double n) {
3403 bigOs.emplace_back(
"O(n^3)", rangeMeasure, [](
double n) {
3406 std::sort(bigOs.begin(), bigOs.end());
3413 std::random_device rd;
3414 std::uniform_int_distribution<uint64_t> dist;
3418 }
while (mX == 0 && mY == 0);
3422uint64_t splitMix64(uint64_t& state) noexcept {
3423 uint64_t z = (state += UINT64_C(0x9e3779b97f4a7c15));
3424 z = (z ^ (z >> 30U)) * UINT64_C(0xbf58476d1ce4e5b9);
3425 z = (z ^ (z >> 27U)) * UINT64_C(0x94d049bb133111eb);
3426 return z ^ (z >> 31U);
3430Rng::Rng(uint64_t seed) noexcept
3431 : mX(splitMix64(seed))
3432 , mY(splitMix64(seed)) {
3433 for (
size_t i = 0; i < 10; ++i) {
3439Rng::Rng(uint64_t x, uint64_t y) noexcept
3443Rng Rng::copy() const noexcept {
3447Rng::Rng(std::vector<uint64_t>
const&
data)
3450 if (
data.size() != 2) {
3451 throw std::runtime_error(
"ankerl::nanobench::Rng::Rng: needed exactly 2 entries in data, but got " +
3452 detail::fmt::to_s(
data.size()));
3458std::vector<uint64_t> Rng::state()
const {
3459 std::vector<uint64_t>
data(2);
3465BigO::RangeMeasure BigO::collectRangeMeasure(std::vector<Result>
const& results) {
3466 BigO::RangeMeasure rangeMeasure;
3467 for (
auto const& result : results) {
3468 if (result.config().mComplexityN > 0.0) {
3469 rangeMeasure.emplace_back(result.config().mComplexityN, result.median(Result::Measure::elapsed));
3472 return rangeMeasure;
3475BigO::BigO(std::string bigOName, RangeMeasure
const& rangeMeasure)
3476 : mName(
std::move(bigOName)) {
3479 double sumRangeMeasure = 0.0;
3480 double sumRangeRange = 0.0;
3482 for (
const auto& rm : rangeMeasure) {
3483 sumRangeMeasure += rm.first * rm.second;
3484 sumRangeRange += rm.first * rm.first;
3486 mConstant = sumRangeMeasure / sumRangeRange;
3490 double sumMeasure = 0.0;
3491 for (
const auto& rm : rangeMeasure) {
3492 auto diff = mConstant * rm.first - rm.second;
3495 sumMeasure += rm.second;
3498 auto n = detail::d(rangeMeasure.size());
3499 auto mean = sumMeasure / n;
3500 mNormalizedRootMeanSquare = std::sqrt(err / n) / mean;
3503BigO::BigO(
const char* bigOName, RangeMeasure
const& rangeMeasure)
3504 : BigO(
std::string(bigOName), rangeMeasure) {}
3506std::string
const&
BigO::name() const noexcept {
3510double BigO::constant() const noexcept {
3514double BigO::normalizedRootMeanSquare() const noexcept {
3515 return mNormalizedRootMeanSquare;
3519 return std::tie(mNormalizedRootMeanSquare, mName) < std::tie(other.mNormalizedRootMeanSquare, other.mName);
3522std::ostream&
operator<<(std::ostream& os, BigO
const& bigO) {
3523 return os << bigO.constant() <<
" * " << bigO.name() <<
", rms=" << bigO.normalizedRootMeanSquare();
3526std::ostream&
operator<<(std::ostream& os, std::vector<ankerl::nanobench::BigO>
const& bigOs) {
3527 detail::fmt::StreamStateRestorer
const restorer(os);
3528 os << std::endl <<
"| coefficient | err% | complexity" << std::endl <<
"|--------------:|-------:|------------" << std::endl;
3529 for (
auto const& bigO : bigOs) {
3530 os <<
"|" << std::setw(14) << std::setprecision(7) << std::scientific << bigO.constant() <<
" ";
3531 os <<
"|" << detail::fmt::Number(6, 1, bigO.normalizedRootMeanSquare() * 100.0) <<
"% ";
3532 os <<
"| " << bigO.name();
Main entry point to nanobench's benchmarking facility.
Bench & operator=(Bench const &other)
ANKERL_NANOBENCH(NODISCARD) std Bench & doNotOptimizeAway(Arg &&arg)
Retrieves all benchmark results collected by the bench object so far.
Bench & run(char const *benchmarkName, Op &&op)
Repeatedly calls op() based on the configuration, and performs measurements.
Bench & batch(T b) noexcept
Sets the batch size.
std::vector< BigO > complexityBigO() const
Bench()
Creates a new benchmark for configuration and running of benchmarks.
Bench & operator=(Bench &&other) noexcept(ANKERL_NANOBENCH(NOEXCEPT_STRING_MOVE))
detail::SetupRunner< SetupOp > setup(SetupOp setupOp)
Configure an untimed setup step per epoch (forces single-iteration epochs).
Bench(Bench &&other) noexcept
Bench(Bench const &other)
Bench & complexityN(T n) noexcept
static RangeMeasure mapRangeMeasure(RangeMeasure data, Op op)
BigO(std::string bigOName, RangeMeasure const &scaledRangeMeasure)
std::vector< std::pair< double, double > > RangeMeasure
BigO(char const *bigOName, RangeMeasure const &rangeMeasure, Op rangeToN)
static RangeMeasure collectRangeMeasure(std::vector< Result > const &results)
BigO(std::string bigOName, RangeMeasure const &rangeMeasure, Op rangeToN)
BigO(char const *bigOName, RangeMeasure const &scaledRangeMeasure)
Result(Config benchmarkConfig)
static Measure fromString(std::string const &str)
void add(Clock::duration totalElapsed, uint64_t iters, detail::PerformanceCounters const &pc)
Result(Result &&other) noexcept
ANKERL_NANOBENCH(NODISCARD) Config const &config() const noexcept
Result & operator=(Result const &other)
Result(Result const &other)
Result & operator=(Result &&other) noexcept(ANKERL_NANOBENCH(NOEXCEPT_STRING_MOVE))
An extremely fast random generator.
static constexpr uint64_t() min()
Rng(Rng const &)=delete
As a safety precaution, we don't allow copying.
void shuffle(Container &container) noexcept
Shuffles all entries in the given container.
Rng(Rng &&) noexcept=default
Rng & operator=(Rng const &)=delete
Same as Rng(Rng const&), we don't allow assignment.
static constexpr uint64_t() max()
double uniform01() noexcept
Provides a random uniform double value between 0 and 1.
uint64_t result_type
This RNG provides 64bit randomness.
void moveResultTo(std::vector< Result > &results) noexcept
void add(std::chrono::nanoseconds elapsed, PerformanceCounters const &pc) noexcept
IterationLogic(IterationLogic &&)=delete
IterationLogic & operator=(IterationLogic const &)=delete
ANKERL_NANOBENCH(NODISCARD) uint64_t numIters() const noexcept
IterationLogic(IterationLogic const &)=delete
IterationLogic(Bench const &bench)
IterationLogic & operator=(IterationLogic &&)=delete
SetupRunner(SetupOp setupOp, Bench &bench)
#define T(expected, seed, data)
void doNotOptimizeAway(T &val)
PerformanceCounters & performanceCounters()
void doNotOptimizeAway(T const &val)
char const * json() noexcept
Template to generate JSON data.
char const * csv() noexcept
CSV data for the benchmark results.
char const * pyperf() noexcept
Output in pyperf compatible JSON format, which can be used for more analyzation.
char const * htmlBoxplot() noexcept
HTML output that uses plotly to generate an interactive boxplot chart. See the tutorial for an exampl...
void render(char const *mustacheTemplate, Bench const &bench, std::ostream &out)
Renders output from a mustache-like template and benchmark results.
std::conditional< std::chrono::high_resolution_clock::is_steady, std::chrono::high_resolution_clock, std::chrono::steady_clock >::type Clock
void render(std::string const &mustacheTemplate, std::vector< Result > const &results, std::ostream &out)
std::ostream & operator<<(std::ostream &os, BigO const &bigO)
std::ostream & operator<<(std::ostream &os, std::vector< ankerl::nanobench::BigO > const &bigOs)
void doNotOptimizeAway(Arg &&arg)
Makes sure none of the given arguments are optimized away by the compiler.
#define ANKERL_NANOBENCH_LOG(x)
#define ANKERL_NANOBENCH_NO_SANITIZE(...)
#define ANKERL_NANOBENCH(x)
bool operator==(const CNetAddr &a, const CNetAddr &b)
bool operator<(const CNetAddr &a, const CNetAddr &b)
Config & operator=(Config const &other)
Config(Config const &other)
Config & operator=(Config &&other) noexcept(ANKERL_NANOBENCH(NOEXCEPT_STRING_MOVE))
Config(Config &&other) noexcept
static SECP256K1_INLINE uint64_t rotl(const uint64_t x, int k)