aboutsummaryrefslogtreecommitdiff
path: root/core
diff options
context:
space:
mode:
authorJordan Niethe <jniethe5@gmail.com>2019-04-02 10:43:17 +1100
committerStewart Smith <stewart@linux.ibm.com>2019-05-20 14:20:29 +1000
commitbc04bf1eef38a11b9d90f379d7be180f98e2b58d (patch)
tree2dba2b79d778a9e79746a07ef1e1000c06953aa3 /core
parentd56b151d7f87d18f7b6333d8ec4d5c5cf04b538c (diff)
downloadskiboot-bc04bf1eef38a11b9d90f379d7be180f98e2b58d.zip
skiboot-bc04bf1eef38a11b9d90f379d7be180f98e2b58d.tar.gz
skiboot-bc04bf1eef38a11b9d90f379d7be180f98e2b58d.tar.bz2
core/trace: Change mask/and to modulo for buffer offset
We would like the be able to mmap the trace buffers so that the dump_trace tool is able to make use of the existing functions for reading traces in external/trace. Mmaping is done by pages which means that buffers should be aligned to page size. This is not as simple as setting the buffer length to a page aligned value as the buffers each have a header and leave space for an extra entry at the end. These must be taken into account so the entire buffer will be page aligned. The current method of calculating buffer offsets is to use a mask and bitwise 'and'. This limits the potential sizes of the buffer to powers of two. The initial justification for using the mask was that the buffers had different sizes so the offset needed to based on information the buffers carried with them, otherwise they could overflow. Being limited to powers of two will make it impossible to page align the entire buffer. Change to using modulo for calculating the buffer offset to make a much larger range of buffer sizes possible. Instead of the mask, make each buffer carry around the length of the buffer to be used for calculating the offset to avoid overflows. Signed-off-by: Jordan Niethe <jniethe5@gmail.com> Signed-off-by: Stewart Smith <stewart@linux.ibm.com>
Diffstat (limited to 'core')
-rw-r--r--core/test/run-trace.c2
-rw-r--r--core/trace.c18
2 files changed, 10 insertions, 10 deletions
diff --git a/core/test/run-trace.c b/core/test/run-trace.c
index ec907c8..8eb4258 100644
--- a/core/test/run-trace.c
+++ b/core/test/run-trace.c
@@ -182,7 +182,7 @@ static void test_parallel(void)
for (i = 0; i < CPUS; i++) {
fake_cpus[i].trace = p + i * len;
- fake_cpus[i].trace->tb.mask = cpu_to_be64(TBUF_SZ - 1);
+ fake_cpus[i].trace->tb.buf_size = cpu_to_be64(TBUF_SZ);
fake_cpus[i].trace->tb.max_size = cpu_to_be32(sizeof(union trace));
fake_cpus[i].is_secondary = false;
}
diff --git a/core/trace.c b/core/trace.c
index 4eb9e36..a7b3cd1 100644
--- a/core/trace.c
+++ b/core/trace.c
@@ -29,7 +29,7 @@
#define DEBUG_TRACES
-#define MAX_SIZE (sizeof(union trace) + 7)
+#define MAX_SIZE sizeof(union trace)
/* Smaller trace buffer for early booting */
#define BOOT_TBUF_SZ 65536
@@ -41,7 +41,7 @@ static struct {
void init_boot_tracebuf(struct cpu_thread *boot_cpu)
{
init_lock(&boot_tracebuf.trace_info.lock);
- boot_tracebuf.trace_info.tb.mask = cpu_to_be64(BOOT_TBUF_SZ - 1);
+ boot_tracebuf.trace_info.tb.buf_size = cpu_to_be64(BOOT_TBUF_SZ);
boot_tracebuf.trace_info.tb.max_size = cpu_to_be32(MAX_SIZE);
boot_cpu->trace = &boot_tracebuf.trace_info;
@@ -61,7 +61,7 @@ static bool handle_repeat(struct tracebuf *tb, const union trace *trace)
struct trace_repeat *rpt;
u32 len;
- prev = (void *)tb->buf + be64_to_cpu(tb->last & tb->mask);
+ prev = (void *)tb->buf + be64_to_cpu(tb->last) % be64_to_cpu(tb->buf_size);
if (prev->type != trace->hdr.type
|| prev->len_div_8 != trace->hdr.len_div_8
@@ -80,7 +80,7 @@ static bool handle_repeat(struct tracebuf *tb, const union trace *trace)
if (be64_to_cpu(tb->last) + len != be64_to_cpu(tb->end)) {
u64 pos = be64_to_cpu(tb->last) + len;
/* FIXME: Reader is not protected from seeing this! */
- rpt = (void *)tb->buf + (pos & be64_to_cpu(tb->mask));
+ rpt = (void *)tb->buf + pos % be64_to_cpu(tb->buf_size);
assert(pos + rpt->len_div_8*8 == be64_to_cpu(tb->end));
assert(rpt->type == TRACE_REPEAT);
@@ -99,7 +99,7 @@ static bool handle_repeat(struct tracebuf *tb, const union trace *trace)
*/
assert(trace->hdr.len_div_8 * 8 >= sizeof(*rpt));
- rpt = (void *)tb->buf + be64_to_cpu(tb->end & tb->mask);
+ rpt = (void *)tb->buf + be64_to_cpu(tb->end) % be64_to_cpu(tb->buf_size);
rpt->timestamp = trace->hdr.timestamp;
rpt->type = TRACE_REPEAT;
rpt->len_div_8 = sizeof(*rpt) >> 3;
@@ -138,12 +138,12 @@ void trace_add(union trace *trace, u8 type, u16 len)
lock(&ti->lock);
/* Throw away old entries before we overwrite them. */
- while ((be64_to_cpu(ti->tb.start) + be64_to_cpu(ti->tb.mask) + 1)
+ while ((be64_to_cpu(ti->tb.start) + be64_to_cpu(ti->tb.buf_size))
< (be64_to_cpu(ti->tb.end) + tsz)) {
struct trace_hdr *hdr;
hdr = (void *)ti->tb.buf +
- be64_to_cpu(ti->tb.start & ti->tb.mask);
+ be64_to_cpu(ti->tb.start) % be64_to_cpu(ti->tb.buf_size);
ti->tb.start = cpu_to_be64(be64_to_cpu(ti->tb.start) +
(hdr->len_div_8 << 3));
}
@@ -154,7 +154,7 @@ void trace_add(union trace *trace, u8 type, u16 len)
/* Check for duplicates... */
if (!handle_repeat(&ti->tb, trace)) {
/* This may go off end, and that's why ti->tb.buf is oversize */
- memcpy(ti->tb.buf + be64_to_cpu(ti->tb.end & ti->tb.mask),
+ memcpy(ti->tb.buf + be64_to_cpu(ti->tb.end) % be64_to_cpu(ti->tb.buf_size),
trace, tsz);
ti->tb.last = ti->tb.end;
lwsync(); /* write barrier: write entry before exposing */
@@ -220,8 +220,8 @@ void init_trace_buffers(void)
any = t->trace;
memset(t->trace, 0, size);
init_lock(&t->trace->lock);
- t->trace->tb.mask = cpu_to_be64(TBUF_SZ - 1);
t->trace->tb.max_size = cpu_to_be32(MAX_SIZE);
+ t->trace->tb.buf_size = cpu_to_be64(TBUF_SZ);
trace_add_desc(any, sizeof(t->trace->tb) +
tracebuf_extra());
} else