aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/CAS/MappedFileRegionArena.cpp
blob: 472843d78af6e402a6c08bbdef917a493ef0c0db (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file Implements MappedFileRegionArena.
///
/// A bump pointer allocator, backed by a memory-mapped file.
///
/// The effect we want is:
///
/// Step 1. If it doesn't exist, create the file with an initial size.
/// Step 2. Reserve virtual memory large enough for the max file size.
/// Step 3. Map the file into memory in the reserved region.
/// Step 4. Increase the file size and update the mapping when necessary.
///
/// However, updating the mapping is challenging when it needs to work portably,
/// and across multiple processes without locking for every read. Our current
/// implementation handles the steps above in following ways:
///
/// Step 1. Use \ref sys::fs::resize_file_sparse to grow the file to its max
///         size (typically several GB). If the file system doesn't support
///         sparse file, this may return a fully allocated file.
/// Step 2. Call \ref sys::fs::mapped_file_region to map the entire file.
/// Step 3. [Automatic as part of step 2.]
/// Step 4. If supported, use \c fallocate or similiar APIs to ensure the file
///         system storage for the sparse file so we won't end up with partial
///         file if the disk is out of space.
///
/// Additionally, we attempt to resize the file to its actual data size when
/// closing the mapping, if this is the only concurrent instance. This is done
/// using file locks. Shrinking the file mitigates problems with having large
/// files: on filesystems without sparse files it avoids unnecessary space use;
/// it also avoids allocating the full size if another process copies the file,
/// which typically loses sparseness. These mitigations only work while the file
/// is not in use.
///
/// The capacity and the header offset is determined by the first user of the
/// MappedFileRegionArena instance and any future mismatched value from the
/// original will result in error on creation.
///
/// To support resizing, we use two separate file locks:
/// 1. We use a shared reader lock on a ".shared" file until destruction.
/// 2. We use a lock on the main file during initialization - shared to check
///    the status, upgraded to exclusive to resize/initialize the file.
///
/// Then during destruction we attempt to get exclusive access on (1), which
/// requires no concurrent readers. If so, we shrink the file. Using two
/// separate locks simplifies the implementation and enables it to work on
/// platforms (e.g. Windows) where a shared/reader lock prevents writing.
//===----------------------------------------------------------------------===//

#include "llvm/CAS/MappedFileRegionArena.h"
#include "OnDiskCommon.h"
#include "llvm/ADT/StringExtras.h"

#if LLVM_ON_UNIX
#include <sys/stat.h>
#if __has_include(<sys/param.h>)
#include <sys/param.h>
#endif
#ifdef DEV_BSIZE
#define MAPPED_FILE_BSIZE DEV_BSIZE
#elif __linux__
#define MAPPED_FILE_BSIZE 512
#endif
#endif

using namespace llvm;
using namespace llvm::cas;
using namespace llvm::cas::ondisk;

namespace {
struct FileWithLock {
  std::string Path;
  int FD = -1;
  std::optional<sys::fs::LockKind> Locked;

private:
  FileWithLock(std::string PathStr, Error &E) : Path(std::move(PathStr)) {
    ErrorAsOutParameter EOP(&E);
    if (std::error_code EC = sys::fs::openFileForReadWrite(
            Path, FD, sys::fs::CD_OpenAlways, sys::fs::OF_None))
      E = createFileError(Path, EC);
  }

public:
  FileWithLock(FileWithLock &) = delete;
  FileWithLock(FileWithLock &&Other) {
    Path = std::move(Other.Path);
    FD = Other.FD;
    Other.FD = -1;
    Locked = Other.Locked;
    Other.Locked = std::nullopt;
  }

  ~FileWithLock() { consumeError(unlock()); }

  static Expected<FileWithLock> open(StringRef Path) {
    Error E = Error::success();
    FileWithLock Result(Path.str(), E);
    if (E)
      return std::move(E);
    return std::move(Result);
  }

  Error lock(sys::fs::LockKind LK) {
    assert(!Locked && "already locked");
    if (std::error_code EC = lockFileThreadSafe(FD, LK))
      return createFileError(Path, EC);
    Locked = LK;
    return Error::success();
  }

  Error switchLock(sys::fs::LockKind LK) {
    assert(Locked && "not locked");
    if (auto E = unlock())
      return E;

    return lock(LK);
  }

  Error unlock() {
    if (Locked) {
      Locked = std::nullopt;
      if (std::error_code EC = unlockFileThreadSafe(FD))
        return createFileError(Path, EC);
    }
    return Error::success();
  }

  // Return true if succeed to lock the file exclusively.
  bool tryLockExclusive() {
    assert(!Locked && "can only try to lock if not locked");
    if (tryLockFileThreadSafe(FD) == std::error_code()) {
      Locked = sys::fs::LockKind::Exclusive;
      return true;
    }

    return false;
  }

  // Release the lock so it will not be unlocked on destruction.
  void release() {
    Locked = std::nullopt;
    FD = -1;
  }
};

struct FileSizeInfo {
  uint64_t Size;
  uint64_t AllocatedSize;

  static ErrorOr<FileSizeInfo> get(sys::fs::file_t File);
};
} // end anonymous namespace

Expected<MappedFileRegionArena> MappedFileRegionArena::create(
    const Twine &Path, uint64_t Capacity, uint64_t HeaderOffset,
    function_ref<Error(MappedFileRegionArena &)> NewFileConstructor) {
  uint64_t MinCapacity = HeaderOffset + sizeof(Header);
  if (Capacity < MinCapacity)
    return createStringError(
        std::make_error_code(std::errc::invalid_argument),
        "capacity is too small to hold MappedFileRegionArena");

  MappedFileRegionArena Result;
  Result.Path = Path.str();

  // Open the shared lock file. See file comment for details of locking scheme.
  SmallString<128> SharedFilePath(Result.Path);
  SharedFilePath.append(".shared");

  auto SharedFileLock = FileWithLock::open(SharedFilePath);
  if (!SharedFileLock)
    return SharedFileLock.takeError();
  Result.SharedLockFD = SharedFileLock->FD;

  // Take shared/reader lock that will be held until destroyImpl if construction
  // is successful.
  if (auto E = SharedFileLock->lock(sys::fs::LockKind::Shared))
    return std::move(E);

  // Take shared/reader lock for initialization.
  auto MainFile = FileWithLock::open(Result.Path);
  if (!MainFile)
    return MainFile.takeError();
  if (Error E = MainFile->lock(sys::fs::LockKind::Shared))
    return std::move(E);
  Result.FD = MainFile->FD;

  sys::fs::file_t File = sys::fs::convertFDToNativeFile(MainFile->FD);
  auto FileSize = FileSizeInfo::get(File);
  if (!FileSize)
    return createFileError(Result.Path, FileSize.getError());

  // If the size is smaller than the capacity, we need to initialize the file.
  // It maybe empty, or may have been shrunk during a previous close.
  if (FileSize->Size < Capacity) {
    // Lock the file exclusively so only one process will do the initialization.
    if (Error E = MainFile->switchLock(sys::fs::LockKind::Exclusive))
      return std::move(E);
    // Retrieve the current size now that we have exclusive access.
    FileSize = FileSizeInfo::get(File);
    if (!FileSize)
      return createFileError(Result.Path, FileSize.getError());
  }

  if (FileSize->Size >= MinCapacity) {
    // File is initialized. Read out the header to check for capacity and
    // offset.
    SmallVector<char, sizeof(Header)> HeaderContent(sizeof(Header));
    auto Size = sys::fs::readNativeFileSlice(File, HeaderContent, HeaderOffset);
    if (!Size)
      return Size.takeError();

    Header H;
    memcpy(&H, HeaderContent.data(), sizeof(H));
    if (H.HeaderOffset != HeaderOffset)
      return createStringError(
          std::make_error_code(std::errc::invalid_argument),
          "specified header offset (" + utostr(HeaderOffset) +
              ") does not match existing config (" + utostr(H.HeaderOffset) +
              ")");

    // If the capacity doesn't match, use the existing capacity instead.
    if (H.Capacity != Capacity)
      Capacity = H.Capacity;
  }

  // If the size is smaller than capacity, we need to resize the file.
  if (FileSize->Size < Capacity) {
    assert(MainFile->Locked == sys::fs::LockKind::Exclusive);
    if (std::error_code EC =
            sys::fs::resize_file_sparse(MainFile->FD, Capacity))
      return createFileError(Result.Path, EC);
  }

  // Create the mapped region.
  {
    std::error_code EC;
    sys::fs::mapped_file_region Map(
        File, sys::fs::mapped_file_region::readwrite, Capacity, 0, EC);
    if (EC)
      return createFileError(Result.Path, EC);
    Result.Region = std::move(Map);
  }

  // Initialize the header.
  Result.initializeHeader(HeaderOffset);
  if (FileSize->Size < MinCapacity) {
    assert(MainFile->Locked == sys::fs::LockKind::Exclusive);
    // If we need to fully initialize the file, call NewFileConstructor.
    if (Error E = NewFileConstructor(Result))
      return std::move(E);

    Result.H->HeaderOffset.exchange(HeaderOffset);
    Result.H->Capacity.exchange(Capacity);
  }

  if (MainFile->Locked == sys::fs::LockKind::Exclusive) {
    // If holding an exclusive lock, we might have resized the file and
    // performed some read/write to the file. Query the file size again to make
    // sure everything is up-to-date. Otherwise, FileSize info is already
    // up-to-date.
    FileSize = FileSizeInfo::get(File);
    if (!FileSize)
      return createFileError(Result.Path, FileSize.getError());
    Result.H->AllocatedSize.exchange(FileSize->AllocatedSize);
  }

  // Release the shared lock so it can be closed in destoryImpl().
  SharedFileLock->release();
  return std::move(Result);
}

void MappedFileRegionArena::destroyImpl() {
  if (!FD)
    return;

  // Drop the shared lock indicating we are no longer accessing the file.
  if (SharedLockFD)
    (void)unlockFileThreadSafe(*SharedLockFD);

  // Attempt to truncate the file if we can get exclusive access. Ignore any
  // errors.
  if (H) {
    assert(SharedLockFD && "Must have shared lock file open");
    if (tryLockFileThreadSafe(*SharedLockFD) == std::error_code()) {
      size_t Size = size();
      // sync to file system to make sure all contents are up-to-date.
      (void)Region.sync();
      // unmap the file before resizing since that is the requirement for
      // some platforms.
      Region.unmap();
      (void)sys::fs::resize_file(*FD, Size);
      (void)unlockFileThreadSafe(*SharedLockFD);
    }
  }

  auto Close = [](std::optional<int> &FD) {
    if (FD) {
      sys::fs::file_t File = sys::fs::convertFDToNativeFile(*FD);
      sys::fs::closeFile(File);
      FD = std::nullopt;
    }
  };

  // Close the file and shared lock.
  Close(FD);
  Close(SharedLockFD);
}

void MappedFileRegionArena::initializeHeader(uint64_t HeaderOffset) {
  assert(capacity() < (uint64_t)INT64_MAX && "capacity must fit in int64_t");
  uint64_t HeaderEndOffset = HeaderOffset + sizeof(decltype(*H));
  assert(HeaderEndOffset <= capacity() &&
         "Expected end offset to be pre-allocated");
  assert(isAligned(Align::Of<decltype(*H)>(), HeaderOffset) &&
         "Expected end offset to be aligned");
  H = reinterpret_cast<decltype(H)>(data() + HeaderOffset);

  uint64_t ExistingValue = 0;
  if (!H->BumpPtr.compare_exchange_strong(ExistingValue, HeaderEndOffset))
    assert(ExistingValue >= HeaderEndOffset &&
           "Expected 0, or past the end of the header itself");
}

static Error createAllocatorOutOfSpaceError() {
  return createStringError(std::make_error_code(std::errc::not_enough_memory),
                           "memory mapped file allocator is out of space");
}

Expected<int64_t> MappedFileRegionArena::allocateOffset(uint64_t AllocSize) {
  AllocSize = alignTo(AllocSize, getAlign());
  uint64_t OldEnd = H->BumpPtr.fetch_add(AllocSize);
  uint64_t NewEnd = OldEnd + AllocSize;
  if (LLVM_UNLIKELY(NewEnd > capacity())) {
    // Return the allocation. If the start already passed the end, that means
    // some other concurrent allocations already consumed all the capacity.
    // There is no need to return the original value. If the start was not
    // passed the end, current allocation certainly bumped it passed the end.
    // All other allocation afterwards must have failed and current allocation
    // is in charge of return the allocation back to a valid value.
    if (OldEnd <= capacity())
      (void)H->BumpPtr.exchange(OldEnd);

    return createAllocatorOutOfSpaceError();
  }

  uint64_t DiskSize = H->AllocatedSize;
  if (LLVM_UNLIKELY(NewEnd > DiskSize)) {
    uint64_t NewSize;
    // The minimum increment is a page, but allocate more to amortize the cost.
    constexpr uint64_t Increment = 1 * 1024 * 1024; // 1 MB
    if (Error E = preallocateFileTail(*FD, DiskSize, DiskSize + Increment)
                      .moveInto(NewSize))
      return std::move(E);
    assert(NewSize >= DiskSize + Increment);
    // FIXME: on Darwin this can under-count the size if there is a race to
    // preallocate disk, because the semantics of F_PREALLOCATE are to add bytes
    // to the end of the file, not to allocate up to a fixed size.
    // Any discrepancy will be resolved the next time the file is truncated and
    // then reopend.
    while (DiskSize < NewSize)
      H->AllocatedSize.compare_exchange_strong(DiskSize, NewSize);
  }
  return OldEnd;
}

ErrorOr<FileSizeInfo> FileSizeInfo::get(sys::fs::file_t File) {
#if LLVM_ON_UNIX && defined(MAPPED_FILE_BSIZE)
  struct stat Status;
  int StatRet = ::fstat(File, &Status);
  if (StatRet)
    return errnoAsErrorCode();
  uint64_t AllocatedSize = uint64_t(Status.st_blksize) * MAPPED_FILE_BSIZE;
  return FileSizeInfo{uint64_t(Status.st_size), AllocatedSize};
#else
  // Fallback: assume the file is fully allocated. Note: this may result in
  // data loss on out-of-space.
  sys::fs::file_status Status;
  if (std::error_code EC = sys::fs::status(File, Status))
    return EC;
  return FileSizeInfo{Status.getSize(), Status.getSize()};
#endif
}