aboutsummaryrefslogtreecommitdiff
path: root/compiler-rt/lib/memprof/memprof_mapping.h
blob: 6da385ab3d6e2df5049a243e944535236e0c0ed2 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
//===-- memprof_mapping.h --------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of MemProfiler, a memory profiler.
//
// Defines MemProf memory mapping.
//===----------------------------------------------------------------------===//
#ifndef MEMPROF_MAPPING_H
#define MEMPROF_MAPPING_H

#include "memprof_internal.h"

static const u64 kDefaultShadowScale = 3;
#define SHADOW_SCALE kDefaultShadowScale

#define SHADOW_OFFSET __memprof_shadow_memory_dynamic_address

#define SHADOW_GRANULARITY (1ULL << SHADOW_SCALE)
#define MEMPROF_ALIGNMENT 32
namespace __memprof {

extern uptr kHighMemEnd; // Initialized in __memprof_init.

} // namespace __memprof

// Size of memory block mapped to a single shadow location
#define MEM_GRANULARITY 64ULL

#define SHADOW_MASK ~(MEM_GRANULARITY - 1)

#define MEM_TO_SHADOW(mem)                                                     \
  ((((mem) & SHADOW_MASK) >> SHADOW_SCALE) + (SHADOW_OFFSET))

// Histogram shadow memory is laid different to the standard configuration:

//             8 bytes
//         +---+---+---+  +---+---+---+  +---+---+---+
//  Memory |     a     |  |     b     |  |     c     |
//         +---+---+---+  +---+---+---+  +---+---+---+

//             +---+          +---+          +---+
//  Shadow     | a |          | b |          | c |
//             +---+          +---+          +---+
//            1 byte
//
// Where we have a 1 byte counter for each 8 bytes. HISTOGRAM_MEM_TO_SHADOW
// translates a memory address to the address of its corresponding shadow
// counter memory address. The same data is still provided in MIB whether
// histograms are used or not. Total access counts per allocations are
// computed by summing up all individual 1 byte counters. This can incur an
// accuracy penalty.

#define HISTOGRAM_GRANULARITY 8ULL

#define HISTOGRAM_MAX_COUNTER 255U

#define HISTOGRAM_SHADOW_MASK ~(HISTOGRAM_GRANULARITY - 1)

#define HISTOGRAM_MEM_TO_SHADOW(mem)                                           \
  ((((mem) & HISTOGRAM_SHADOW_MASK) >> SHADOW_SCALE) + (SHADOW_OFFSET))

#define SHADOW_ENTRY_SIZE (MEM_GRANULARITY >> SHADOW_SCALE)

#define kLowMemBeg 0
#define kLowMemEnd (SHADOW_OFFSET ? SHADOW_OFFSET - 1 : 0)

#define kLowShadowBeg SHADOW_OFFSET
#define kLowShadowEnd (MEM_TO_SHADOW(kLowMemEnd) + SHADOW_ENTRY_SIZE - 1)

#define kHighMemBeg (MEM_TO_SHADOW(kHighMemEnd) + 1 + SHADOW_ENTRY_SIZE - 1)

#define kHighShadowBeg MEM_TO_SHADOW(kHighMemBeg)
#define kHighShadowEnd (MEM_TO_SHADOW(kHighMemEnd) + SHADOW_ENTRY_SIZE - 1)

// With the zero shadow base we can not actually map pages starting from 0.
// This constant is somewhat arbitrary.
#define kZeroBaseShadowStart 0
#define kZeroBaseMaxShadowStart (1 << 18)

#define kShadowGapBeg (kLowShadowEnd ? kLowShadowEnd + 1 : kZeroBaseShadowStart)
#define kShadowGapEnd (kHighShadowBeg - 1)

namespace __memprof {

inline uptr MemToShadowSize(uptr size) { return size >> SHADOW_SCALE; }
inline bool AddrIsInLowMem(uptr a) { return a <= kLowMemEnd; }

inline bool AddrIsInLowShadow(uptr a) {
  return a >= kLowShadowBeg && a <= kLowShadowEnd;
}

inline bool AddrIsInHighMem(uptr a) {
  return kHighMemBeg && a >= kHighMemBeg && a <= kHighMemEnd;
}

inline bool AddrIsInHighShadow(uptr a) {
  return kHighMemBeg && a >= kHighShadowBeg && a <= kHighShadowEnd;
}

inline bool AddrIsInShadowGap(uptr a) {
  // In zero-based shadow mode we treat addresses near zero as addresses
  // in shadow gap as well.
  if (SHADOW_OFFSET == 0)
    return a <= kShadowGapEnd;
  return a >= kShadowGapBeg && a <= kShadowGapEnd;
}

inline bool AddrIsInMem(uptr a) {
  return AddrIsInLowMem(a) || AddrIsInHighMem(a) ||
         (flags()->protect_shadow_gap == 0 && AddrIsInShadowGap(a));
}

inline uptr MemToShadow(uptr p) {
  CHECK(AddrIsInMem(p));
  return MEM_TO_SHADOW(p);
}

inline bool AddrIsInShadow(uptr a) {
  return AddrIsInLowShadow(a) || AddrIsInHighShadow(a);
}

inline bool AddrIsAlignedByGranularity(uptr a) {
  return (a & (SHADOW_GRANULARITY - 1)) == 0;
}

inline void RecordAccess(uptr a) {
  // If we use a different shadow size then the type below needs adjustment.
  CHECK_EQ(SHADOW_ENTRY_SIZE, 8);
  u64 *shadow_address = (u64 *)MEM_TO_SHADOW(a);
  (*shadow_address)++;
}

inline void RecordAccessHistogram(uptr a) {
  CHECK_EQ(SHADOW_ENTRY_SIZE, 8);
  u8 *shadow_address = (u8 *)HISTOGRAM_MEM_TO_SHADOW(a);
  if (*shadow_address < HISTOGRAM_MAX_COUNTER) {
    (*shadow_address)++;
  }
}

} // namespace __memprof

#endif // MEMPROF_MAPPING_H