Skip to content

Commit ca6637d

Browse files
committed
Add a cache to reduce the likelihood of talking to the backend.
1 parent f017064 commit ca6637d

File tree

2 files changed

+527
-8
lines changed

2 files changed

+527
-8
lines changed

src/snmalloc/mem/corealloc.h

Lines changed: 110 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
#include "../ds/ds.h"
44
#include "check_init.h"
55
#include "freelist.h"
6+
#include "largecache.h"
67
#include "metadata.h"
78
#include "pool.h"
89
#include "remotecache.h"
@@ -181,6 +182,13 @@ namespace snmalloc
181182
*/
182183
Ticker<typename Config::Pal> ticker;
183184

185+
/**
186+
* Cache for large object allocations.
187+
* Avoids pagemap manipulation and backend buddy tree operations
188+
* for recently freed large allocations.
189+
*/
190+
LargeObjectCache<Config> large_object_cache;
191+
184192
/**
185193
* The message queue needs to be accessible from other threads
186194
*
@@ -695,14 +703,79 @@ namespace snmalloc
695703
return Conts::success(result, size, true);
696704
}
697705

706+
auto chunk_size = large_size_to_chunk_size(size);
707+
auto sizeclass = size_to_sizeclass_full(size);
708+
709+
// Check the frontend large object cache first.
710+
// This avoids all pagemap and backend manipulation.
711+
auto* cached_meta = self->large_object_cache.try_alloc(
712+
chunk_size, [self](BackendSlabMetadata* fmeta) {
713+
self->flush_large_cache_entry(fmeta);
714+
});
715+
if (cached_meta != nullptr)
716+
{
717+
// Cache hit: pagemap still valid, recover address from meta.
718+
auto slab_addr =
719+
cached_meta->get_slab_interior(freelist::Object::key_root);
720+
cached_meta->initialise_large(
721+
slab_addr, freelist::Object::key_root);
722+
self->laden.insert(cached_meta);
723+
724+
// Reconstruct the capptr from the address.
725+
auto p = Config::Backend::capptr_rederive_alloc(
726+
capptr::Alloc<void>::unsafe_from(
727+
reinterpret_cast<void*>(slab_addr)),
728+
chunk_size);
729+
return Conts::success(capptr_reveal(p), size);
730+
}
731+
732+
// Cache miss: go to backend.
698733
// Grab slab of correct size
699734
// Set remote as large allocator remote.
700735
auto [chunk, meta] = Config::Backend::alloc_chunk(
701736
self->get_backend_local_state(),
702-
large_size_to_chunk_size(size),
703-
PagemapEntry::encode(
704-
self->public_state(), size_to_sizeclass_full(size)),
705-
size_to_sizeclass_full(size));
737+
chunk_size,
738+
PagemapEntry::encode(self->public_state(), sizeclass),
739+
sizeclass);
740+
741+
// If backend OOM, try staged cache flush and retry.
742+
// First flush smaller sizes — they coalesce upward in the
743+
// buddy. If that's not enough, flush one larger entry —
744+
// the buddy can split it.
745+
if (meta == nullptr)
746+
{
747+
auto flush_fn = [self](BackendSlabMetadata* fmeta) {
748+
self->flush_large_cache_entry(fmeta);
749+
};
750+
751+
// Stage 1: flush all smaller sizeclasses.
752+
if (self->large_object_cache.flush_smaller(
753+
chunk_size, flush_fn))
754+
{
755+
auto retry = Config::Backend::alloc_chunk(
756+
self->get_backend_local_state(),
757+
chunk_size,
758+
PagemapEntry::encode(self->public_state(), sizeclass),
759+
sizeclass);
760+
chunk = retry.first;
761+
meta = retry.second;
762+
}
763+
764+
// Stage 2: flush a single larger-or-equal entry.
765+
if (
766+
meta == nullptr &&
767+
self->large_object_cache.flush_one_larger(
768+
chunk_size, flush_fn))
769+
{
770+
auto retry = Config::Backend::alloc_chunk(
771+
self->get_backend_local_state(),
772+
chunk_size,
773+
PagemapEntry::encode(self->public_state(), sizeclass),
774+
sizeclass);
775+
chunk = retry.first;
776+
meta = retry.second;
777+
}
778+
}
706779

707780
#ifdef SNMALLOC_TRACING
708781
message<1024>(
@@ -1086,6 +1159,7 @@ namespace snmalloc
10861159
const PagemapEntry& entry,
10871160
BackendSlabMetadata* meta) noexcept
10881161
{
1162+
UNUSED(p);
10891163
// TODO: Handle message queue on this path?
10901164

10911165
if (meta->is_large())
@@ -1100,15 +1174,21 @@ namespace snmalloc
11001174

11011175
#ifdef SNMALLOC_TRACING
11021176
message<1024>("Large deallocation: {}", size);
1103-
#else
1104-
UNUSED(size);
11051177
#endif
11061178

11071179
// Remove from set of fully used slabs.
11081180
meta->node.remove();
11091181

1110-
Config::Backend::dealloc_chunk(
1111-
get_backend_local_state(), *meta, p, size, entry.get_sizeclass());
1182+
// Cache in the frontend large object cache.
1183+
// The meta's free_queue already holds the chunk address (from
1184+
// initialise_large), and the pagemap entry retains the sizeclass
1185+
// and remote allocator info. No data is stored in the freed object.
1186+
// Epoch sync happens internally; stale entries are flushed via the
1187+
// callback.
1188+
large_object_cache.cache(
1189+
meta, size, [this](BackendSlabMetadata* fmeta) {
1190+
flush_large_cache_entry(fmeta);
1191+
});
11121192

11131193
return;
11141194
}
@@ -1117,6 +1197,24 @@ namespace snmalloc
11171197
dealloc_local_object_meta(entry, meta);
11181198
}
11191199

1200+
/**
1201+
* Flush a single cached large object back to the backend.
1202+
* Recovers the chunk address from the metadata and size from the pagemap.
1203+
*/
1204+
void flush_large_cache_entry(BackendSlabMetadata* meta)
1205+
{
1206+
auto slab_addr = meta->get_slab_interior(freelist::Object::key_root);
1207+
const PagemapEntry& entry = Config::Backend::get_metaentry(slab_addr);
1208+
size_t entry_sizeclass = entry.get_sizeclass().as_large();
1209+
size_t size = bits::one_at_bit(entry_sizeclass);
1210+
1211+
auto p =
1212+
capptr::Alloc<void>::unsafe_from(reinterpret_cast<void*>(slab_addr));
1213+
1214+
Config::Backend::dealloc_chunk(
1215+
get_backend_local_state(), *meta, p, size, entry.get_sizeclass());
1216+
}
1217+
11201218
/**
11211219
* Very slow path for object deallocation.
11221220
*
@@ -1427,6 +1525,10 @@ namespace snmalloc
14271525
dealloc_local_slabs<mitigations(freelist_teardown_validate)>(sizeclass);
14281526
}
14291527

1528+
// Flush the large object cache back to the backend.
1529+
large_object_cache.flush_all(
1530+
[this](BackendSlabMetadata* fmeta) { flush_large_cache_entry(fmeta); });
1531+
14301532
if constexpr (mitigations(freelist_teardown_validate))
14311533
{
14321534
laden.iterate(

0 commit comments

Comments
 (0)