diff --git a/absl/container/internal/raw_hash_set.cc b/absl/container/internal/raw_hash_set.cc index 97e56af8442..8911aa3dc82 100644 --- a/absl/container/internal/raw_hash_set.cc +++ b/absl/container/internal/raw_hash_set.cc @@ -24,6 +24,7 @@ #include "absl/base/config.h" #include "absl/base/dynamic_annotations.h" #include "absl/base/internal/endian.h" +#include "absl/base/internal/raw_logging.h" #include "absl/base/optimization.h" #include "absl/container/internal/container_memory.h" #include "absl/container/internal/hashtablez_sampler.h" @@ -661,6 +662,10 @@ size_t PrepareInsertNonSoo(CommonFields& common, size_t hash, FindInfo target, return target.offset; } +void HashTableSizeOverflow() { + ABSL_RAW_LOG(FATAL, "Hash table size overflow"); +} + } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h index 00c60a23d3b..2a9c5390ccb 100644 --- a/absl/container/internal/raw_hash_set.h +++ b/absl/container/internal/raw_hash_set.h @@ -1226,6 +1226,9 @@ class RawHashSetLayout { // Given the capacity of a table, computes the total size of the backing // array. size_t alloc_size(size_t slot_size) const { + ABSL_SWISSTABLE_ASSERT( + slot_size <= + ((std::numeric_limits::max)() - slot_offset_) / capacity_); return slot_offset_ + capacity_ * slot_size; } @@ -1544,6 +1547,15 @@ inline size_t NormalizeCapacity(size_t n) { return n ? ~size_t{} >> countl_zero(n) : 1; } +template +size_t MaxValidCapacity() { + return NormalizeCapacity((std::numeric_limits::max)() / 4 / + kSlotSize); +} + +// Use a non-inlined function to avoid code bloat. +[[noreturn]] void HashTableSizeOverflow(); + // General notes on capacity/growth methods below: // - We use 7/8th as maximum load factor. For 16-wide groups, that gives an // average of two empty slots per group. @@ -2645,6 +2657,10 @@ class raw_hash_set { : settings_(CommonFields::CreateDefault(), hash, eq, alloc) { if (bucket_count > DefaultCapacity()) { + if (ABSL_PREDICT_FALSE(bucket_count > + MaxValidCapacity())) { + HashTableSizeOverflow(); + } resize(NormalizeCapacity(bucket_count)); } } @@ -2917,7 +2933,9 @@ class raw_hash_set { ABSL_ASSUME(cap >= kDefaultCapacity); return cap; } - size_t max_size() const { return (std::numeric_limits::max)(); } + size_t max_size() const { + return CapacityToGrowth(MaxValidCapacity()); + } ABSL_ATTRIBUTE_REINITIALIZES void clear() { if (SwisstableGenerationsEnabled() && @@ -3376,6 +3394,9 @@ class raw_hash_set { auto m = NormalizeCapacity(n | GrowthToLowerboundCapacity(size())); // n == 0 unconditionally rehashes as per the standard. if (n == 0 || m > cap) { + if (ABSL_PREDICT_FALSE(m > MaxValidCapacity())) { + HashTableSizeOverflow(); + } resize(m); // This is after resize, to ensure that we have completed the allocation @@ -3388,6 +3409,9 @@ class raw_hash_set { const size_t max_size_before_growth = is_soo() ? SooCapacity() : size() + growth_left(); if (n > max_size_before_growth) { + if (ABSL_PREDICT_FALSE(n > max_size())) { + HashTableSizeOverflow(); + } size_t m = GrowthToLowerboundCapacity(n); resize(NormalizeCapacity(m)); diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc index 833a385ef88..8a17c018cc4 100644 --- a/absl/container/internal/raw_hash_set_test.cc +++ b/absl/container/internal/raw_hash_set_test.cc @@ -3737,6 +3737,14 @@ TEST(Table, MovedFromCallsFail) { } } +TEST(Table, MaxSizeOverflow) { + size_t overflow = (std::numeric_limits::max)(); + EXPECT_DEATH_IF_SUPPORTED(IntTable t(overflow), "Hash table size overflow"); + IntTable t; + EXPECT_DEATH_IF_SUPPORTED(t.reserve(overflow), "Hash table size overflow"); + EXPECT_DEATH_IF_SUPPORTED(t.rehash(overflow), "Hash table size overflow"); +} + } // namespace } // namespace container_internal ABSL_NAMESPACE_END