aboutsummaryrefslogtreecommitdiff
path: root/test/native/hash.cc
diff options
context:
space:
mode:
authorJohannes Stoelp <johannes.stoelp@gmail.com>2022-01-14 23:51:05 +0100
committerJohannes Stoelp <johannes.stoelp@gmail.com>2022-01-14 23:51:05 +0100
commitf9928a1a08c57fe853888119a996c3acc98ee09d (patch)
treec9770b76ffcc281da141f3aa2c595600372c0fca /test/native/hash.cc
downloadpio-nodemcuv2-dhcp-server-main.tar.gz
pio-nodemcuv2-dhcp-server-main.zip
Initial version of nodemcuv2 dhcp serverHEADmain
Able to offer IP address + DNS/Gateway ... Worked with devices at my hand.
Diffstat (limited to 'test/native/hash.cc')
-rw-r--r--test/native/hash.cc71
1 files changed, 71 insertions, 0 deletions
diff --git a/test/native/hash.cc b/test/native/hash.cc
new file mode 100644
index 0000000..8aa0a35
--- /dev/null
+++ b/test/native/hash.cc
@@ -0,0 +1,71 @@
+// Copyright (c) 2022 Johannes Stoelp
+
+#include <types.h>
+#include <utils.h>
+
+#include <filesystem>
+#include <fstream>
+#include <gtest/gtest.h>
+#include <iostream>
+#include <vector>
+
+using ID = std::array<u8, 6>;
+namespace fs = std::filesystem;
+
+std::vector<ID> read_blob() {
+ if (!fs::exists("blob")) {
+ std::system("dd if=/dev/urandom of=blob count=16 bs=1M");
+ }
+
+ std::vector<ID> ids;
+ ids.reserve(fs::file_size("blob") / sizeof(ID));
+
+ auto ifs = std::ifstream("blob");
+ assert(ifs.is_open());
+ ID id;
+ while (ifs.read((char*)id.data(), sizeof(id))) {
+ ids.push_back(id);
+ }
+
+ return ids;
+}
+
+TEST(hash, uniform_distribuation) {
+ constexpr usize BUCKETS = 64;
+ constexpr float BUCKET_SIZE = static_cast<float>(100) / BUCKETS; // Bucket size in percent.
+ constexpr float BUCKET_ERR = BUCKET_SIZE * 0.05 /* 5% */; // Allowed distribution error.
+
+ const auto ids = read_blob();
+
+ usize cnt[BUCKETS] = {0};
+ for (const auto& id : ids) {
+ u32 h = hash(id.data(), id.size());
+ cnt[h % BUCKETS] += 1;
+ }
+
+ for (usize b = 0; b < BUCKETS; ++b) {
+ const float dist = static_cast<float>(cnt[b]) / ids.size() * 100;
+ ASSERT_GT(dist, BUCKET_SIZE - BUCKET_ERR);
+ ASSERT_LT(dist, BUCKET_SIZE + BUCKET_ERR);
+ // printf("bucket %2ld: %5.2f (%ld)\n", b, dist, cnt[b]);
+ }
+}
+
+TEST(hash, DISABLED_collisions) {
+ const auto ids = read_blob();
+
+ std::unordered_map<u32, usize> hits;
+ for (const auto& id : ids) {
+ u32 h = hash(id.data(), id.size());
+ hits[h] = hits[h] + 1;
+ }
+
+ usize collisions = 0;
+ for (const auto& hit : hits) {
+ if (hit.second > 1) {
+ ++collisions;
+ }
+ }
+
+ printf("Hashed %ld values got %ld collisions\n", ids.size(), collisions);
+}