1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
|
#ifndef CHUNKED_MEMORY_POOL_INL_H_
#error "Direct inclusion of this file is not allowed, include chunked_memory_pool.h"
// For the sake of sane code completion.
#include "chunked_memory_pool.h"
#endif
#include "serialize.h"
#include <library/cpp/yt/malloc/malloc.h>
#include <util/system/align.h>
namespace NYT {
////////////////////////////////////////////////////////////////////////////////
inline void TAllocationHolder::operator delete(void* ptr) noexcept
{
::free(ptr);
}
inline TMutableRef TAllocationHolder::GetRef() const
{
return Ref_;
}
template <class TDerived>
TDerived* TAllocationHolder::Allocate(size_t size, TRefCountedTypeCookie cookie)
{
auto requestedSize = sizeof(TDerived) + size;
auto* ptr = ::malloc(requestedSize);
#ifndef _win_
auto allocatedSize = ::malloc_usable_size(ptr);
if (allocatedSize) {
size += allocatedSize - requestedSize;
}
#endif
auto* instance = static_cast<TDerived*>(ptr);
try {
new (instance) TDerived(TMutableRef(instance + 1, size), cookie);
} catch (const std::exception& ex) {
// Do not forget to free the memory.
::free(ptr);
throw;
}
return instance;
}
////////////////////////////////////////////////////////////////////////////////
inline TChunkedMemoryPool::TChunkedMemoryPool()
: TChunkedMemoryPool(
GetRefCountedTypeCookie<TDefaultChunkedMemoryPoolTag>())
{ }
template <class TTag>
inline TChunkedMemoryPool::TChunkedMemoryPool(
TTag,
size_t startChunkSize)
: TChunkedMemoryPool(
GetRefCountedTypeCookie<TTag>(),
startChunkSize)
{ }
inline char* TChunkedMemoryPool::AllocateUnaligned(size_t size)
{
// Fast path.
if (FreeZoneEnd_ >= FreeZoneBegin_ + size) {
FreeZoneEnd_ -= size;
Size_ += size;
return FreeZoneEnd_;
}
// Slow path.
return AllocateUnalignedSlow(size);
}
inline char* TChunkedMemoryPool::AllocateAligned(size_t size, int align)
{
// NB: This can lead to FreeZoneBegin_ >= FreeZoneEnd_ in which case the chunk is full.
FreeZoneBegin_ = AlignUp(FreeZoneBegin_, align);
// Fast path.
if (FreeZoneBegin_ + size <= FreeZoneEnd_) {
char* result = FreeZoneBegin_;
Size_ += size;
FreeZoneBegin_ += size;
return result;
}
// Slow path.
return AllocateAlignedSlow(size, align);
}
template <class T>
inline T* TChunkedMemoryPool::AllocateUninitialized(int n, int align)
{
return reinterpret_cast<T*>(AllocateAligned(sizeof(T) * n, align));
}
template <class T>
inline TMutableRange<T> TChunkedMemoryPool::Capture(TRange<T> src, int align)
{
auto* dst = AllocateUninitialized<T>(src.Size(), align);
::memcpy(dst, src.Begin(), sizeof(T) * src.Size());
return TMutableRange<T>(dst, src.Size());
}
inline void TChunkedMemoryPool::Free(char* from, char* to)
{
if (FreeZoneBegin_ == to) {
FreeZoneBegin_ = from;
}
if (FreeZoneEnd_ == from) {
FreeZoneEnd_ = to;
}
}
inline void TChunkedMemoryPool::Clear()
{
Size_ = 0;
if (Chunks_.empty()) {
FreeZoneBegin_ = nullptr;
FreeZoneEnd_ = nullptr;
NextChunkIndex_ = 0;
} else {
FreeZoneBegin_ = Chunks_.front()->GetRef().Begin();
FreeZoneEnd_ = Chunks_.front()->GetRef().End();
NextChunkIndex_ = 1;
}
for (const auto& block : OtherBlocks_) {
Capacity_ -= block->GetRef().Size();
}
OtherBlocks_.clear();
}
////////////////////////////////////////////////////////////////////////////////
} // namespace NYT
|