aboutsummaryrefslogtreecommitdiff
path: root/core
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2014-11-28 11:49:03 +1100
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2014-11-28 11:49:03 +1100
commit3b9af3dfe4368cad5cb78727b1e1e04b45aace18 (patch)
tree93156a56badb9b524adf4533d5b9373b30d4d47a /core
parent89192badc730eea4eede2dbe662a3755443e9141 (diff)
downloadskiboot-3b9af3dfe4368cad5cb78727b1e1e04b45aace18.zip
skiboot-3b9af3dfe4368cad5cb78727b1e1e04b45aace18.tar.gz
skiboot-3b9af3dfe4368cad5cb78727b1e1e04b45aace18.tar.bz2
More trace endian fixes so make check works again
We need the core to do proper endian among others since that code is compiled in run-trace Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'core')
-rw-r--r--core/test/run-trace.c58
-rw-r--r--core/trace.c43
2 files changed, 53 insertions, 48 deletions
diff --git a/core/test/run-trace.c b/core/test/run-trace.c
index cdd84bd..79c5701 100644
--- a/core/test/run-trace.c
+++ b/core/test/run-trace.c
@@ -174,8 +174,8 @@ static void test_parallel(void)
for (i = 0; i < CPUS; i++) {
fake_cpus[i].trace = p + i * len;
- fake_cpus[i].trace->tb.mask = TBUF_SZ - 1;
- fake_cpus[i].trace->tb.max_size = sizeof(union trace);
+ fake_cpus[i].trace->tb.mask = cpu_to_be64(TBUF_SZ - 1);
+ fake_cpus[i].trace->tb.max_size = cpu_to_be32(sizeof(union trace));
fake_cpus[i].is_secondary = false;
}
@@ -202,27 +202,27 @@ static void test_parallel(void)
i = (i + last) % CPUS;
last = i;
- assert(t.hdr.cpu < CPUS);
- assert(!done[t.hdr.cpu]);
+ assert(be16_to_cpu(t.hdr.cpu) < CPUS);
+ assert(!done[be16_to_cpu(t.hdr.cpu)]);
if (t.hdr.type == TRACE_OVERFLOW) {
/* Conveniently, each record is 16 bytes here. */
- assert(t.overflow.bytes_missed % 16 == 0);
- overflows[i] += t.overflow.bytes_missed / 16;
+ assert(be64_to_cpu(t.overflow.bytes_missed) % 16 == 0);
+ overflows[i] += be64_to_cpu(t.overflow.bytes_missed) / 16;
num_overflows[i]++;
continue;
}
- assert(t.hdr.timestamp % CPUS == t.hdr.cpu);
+ assert(be64_to_cpu(t.hdr.timestamp) % CPUS == be16_to_cpu(t.hdr.cpu));
if (t.hdr.type == TRACE_REPEAT) {
assert(t.hdr.len_div_8 * 8 == sizeof(t.repeat));
- assert(t.repeat.num != 0);
- assert(t.repeat.num <= t.hdr.cpu);
- repeats[t.hdr.cpu] += t.repeat.num;
+ assert(be16_to_cpu(t.repeat.num) != 0);
+ assert(be16_to_cpu(t.repeat.num) <= be16_to_cpu(t.hdr.cpu));
+ repeats[be16_to_cpu(t.hdr.cpu)] += be16_to_cpu(t.repeat.num);
} else if (t.hdr.type == 0x70) {
- done[t.hdr.cpu] = true;
+ done[be16_to_cpu(t.hdr.cpu)] = true;
} else {
- counts[t.hdr.cpu]++;
+ counts[be16_to_cpu(t.hdr.cpu)]++;
}
}
@@ -235,7 +235,7 @@ static void test_parallel(void)
for (i = 0; i < CPUS; i++) {
printf("Child %i: %u produced, %u overflows, %llu total\n", i,
counts[i], overflows[i],
- (long long)fake_cpus[i].trace->tb.end);
+ (long long)be64_to_cpu(fake_cpus[i].trace->tb.end));
assert(counts[i] + repeats[i] <= PER_CHILD_TRACES);
}
/* Child 0 never repeats. */
@@ -276,7 +276,7 @@ int main(void)
trace_add(&minimal, 100, sizeof(trace.hdr));
assert(trace_get(&trace, &my_fake_cpu->trace->tb));
assert(trace.hdr.len_div_8 == minimal.hdr.len_div_8);
- assert(trace.hdr.timestamp == timestamp);
+ assert(be64_to_cpu(trace.hdr.timestamp) == timestamp);
/* Make it wrap once. */
for (i = 0; i < TBUF_SZ / (minimal.hdr.len_div_8 * 8) + 1; i++) {
@@ -288,12 +288,12 @@ int main(void)
/* First one must be overflow marker. */
assert(trace.hdr.type == TRACE_OVERFLOW);
assert(trace.hdr.len_div_8 * 8 == sizeof(trace.overflow));
- assert(trace.overflow.bytes_missed == minimal.hdr.len_div_8 * 8);
+ assert(be64_to_cpu(trace.overflow.bytes_missed) == minimal.hdr.len_div_8 * 8);
for (i = 0; i < TBUF_SZ / (minimal.hdr.len_div_8 * 8); i++) {
assert(trace_get(&trace, &my_fake_cpu->trace->tb));
assert(trace.hdr.len_div_8 == minimal.hdr.len_div_8);
- assert(trace.hdr.timestamp == i+1);
+ assert(be64_to_cpu(trace.hdr.timestamp) == i+1);
assert(trace.hdr.type == 99 + ((i+1)%2));
}
assert(!trace_get(&trace, &my_fake_cpu->trace->tb));
@@ -309,9 +309,9 @@ int main(void)
assert(trace.hdr.type == TRACE_OVERFLOW);
assert(trace_get(&trace, &my_fake_cpu->trace->tb));
assert(trace.hdr.len_div_8 == large.hdr.len_div_8);
- i = trace.hdr.timestamp;
+ i = be64_to_cpu(trace.hdr.timestamp);
while (trace_get(&trace, &my_fake_cpu->trace->tb))
- assert(trace.hdr.timestamp == ++i);
+ assert(be64_to_cpu(trace.hdr.timestamp) == ++i);
/* Test repeats. */
for (i = 0; i < 65538; i++) {
@@ -330,33 +330,33 @@ int main(void)
assert(trace_get(&trace, &my_fake_cpu->trace->tb));
assert(trace.hdr.type == TRACE_REPEAT);
assert(trace.hdr.len_div_8 * 8 == sizeof(trace.repeat));
- assert(trace.repeat.num == 65535);
- assert(trace.repeat.timestamp == 65535);
+ assert(be16_to_cpu(trace.repeat.num) == 65535);
+ assert(be64_to_cpu(trace.repeat.timestamp) == 65535);
assert(trace_get(&trace, &my_fake_cpu->trace->tb));
- assert(trace.hdr.timestamp == 65536);
+ assert(be64_to_cpu(trace.hdr.timestamp) == 65536);
assert(trace.hdr.len_div_8 == minimal.hdr.len_div_8);
assert(trace.hdr.type == 100);
assert(trace_get(&trace, &my_fake_cpu->trace->tb));
assert(trace.hdr.type == TRACE_REPEAT);
assert(trace.hdr.len_div_8 * 8 == sizeof(trace.repeat));
- assert(trace.repeat.num == 1);
- assert(trace.repeat.timestamp == 65537);
+ assert(be16_to_cpu(trace.repeat.num) == 1);
+ assert(be64_to_cpu(trace.repeat.timestamp) == 65537);
assert(trace_get(&trace, &my_fake_cpu->trace->tb));
- assert(trace.hdr.timestamp == 65538);
+ assert(be64_to_cpu(trace.hdr.timestamp) == 65538);
assert(trace.hdr.len_div_8 == minimal.hdr.len_div_8);
assert(trace.hdr.type == 101);
assert(trace_get(&trace, &my_fake_cpu->trace->tb));
assert(trace.hdr.type == TRACE_REPEAT);
assert(trace.hdr.len_div_8 * 8 == sizeof(trace.repeat));
- assert(trace.repeat.num == 1);
- assert(trace.repeat.timestamp == 65539);
+ assert(be16_to_cpu(trace.repeat.num) == 1);
+ assert(be64_to_cpu(trace.repeat.timestamp) == 65539);
/* Now, test adding repeat while we're reading... */
timestamp = 0;
trace_add(&minimal, 100, sizeof(trace.hdr));
assert(trace_get(&trace, &my_fake_cpu->trace->tb));
- assert(trace.hdr.timestamp == 0);
+ assert(be64_to_cpu(trace.hdr.timestamp) == 0);
assert(trace.hdr.len_div_8 == minimal.hdr.len_div_8);
assert(trace.hdr.type == 100);
@@ -370,9 +370,9 @@ int main(void)
} else {
assert(trace.hdr.type == TRACE_REPEAT);
assert(trace.hdr.len_div_8 * 8 == sizeof(trace.repeat));
- assert(trace.repeat.num == 1);
+ assert(be16_to_cpu(trace.repeat.num) == 1);
}
- assert(trace.repeat.timestamp == i);
+ assert(be64_to_cpu(trace.repeat.timestamp) == i);
assert(!trace_get(&trace, &my_fake_cpu->trace->tb));
}
diff --git a/core/trace.c b/core/trace.c
index 76f3c30..47c10e3 100644
--- a/core/trace.c
+++ b/core/trace.c
@@ -64,7 +64,7 @@ static bool handle_repeat(struct tracebuf *tb, const union trace *trace)
struct trace_repeat *rpt;
u32 len;
- prev = (void *)tb->buf + (tb->last & tb->mask);
+ prev = (void *)tb->buf + be64_to_cpu(tb->last & tb->mask);
if (prev->type != trace->hdr.type
|| prev->len_div_8 != trace->hdr.len_div_8
@@ -76,21 +76,22 @@ static bool handle_repeat(struct tracebuf *tb, const union trace *trace)
return false;
/* If they've consumed prev entry, don't repeat. */
- if (tb->last < tb->start)
+ if (be64_to_cpu(tb->last) < be64_to_cpu(tb->start))
return false;
/* OK, it's a duplicate. Do we already have repeat? */
- if (tb->last + len != tb->end) {
+ if (be64_to_cpu(tb->last) + len != be64_to_cpu(tb->end)) {
+ u64 pos = be64_to_cpu(tb->last) + len;
/* FIXME: Reader is not protected from seeing this! */
- rpt = (void *)tb->buf + ((tb->last + len) & tb->mask);
- assert(tb->last + len + rpt->len_div_8*8 == tb->end);
+ rpt = (void *)tb->buf + (pos & be64_to_cpu(tb->mask));
+ assert(pos + rpt->len_div_8*8 == be64_to_cpu(tb->end));
assert(rpt->type == TRACE_REPEAT);
/* If this repeat entry is full, don't repeat. */
- if (rpt->num == 0xFFFF)
+ if (be16_to_cpu(rpt->num) == 0xFFFF)
return false;
- rpt->num++;
+ rpt->num = cpu_to_be16(be16_to_cpu(rpt->num) + 1);
rpt->timestamp = trace->hdr.timestamp;
return true;
}
@@ -101,15 +102,15 @@ static bool handle_repeat(struct tracebuf *tb, const union trace *trace)
*/
assert(trace->hdr.len_div_8 * 8 >= sizeof(*rpt));
- rpt = (void *)tb->buf + (tb->end & tb->mask);
+ rpt = (void *)tb->buf + be64_to_cpu(tb->end & tb->mask);
rpt->timestamp = trace->hdr.timestamp;
rpt->type = TRACE_REPEAT;
rpt->len_div_8 = sizeof(*rpt) >> 3;
rpt->cpu = trace->hdr.cpu;
rpt->prev_len = trace->hdr.len_div_8 << 3;
- rpt->num = 1;
+ rpt->num = cpu_to_be16(1);
lwsync(); /* write barrier: complete repeat record before exposing */
- tb->end += sizeof(*rpt);
+ tb->end = cpu_to_be64(be64_to_cpu(tb->end) + sizeof(*rpt));
return true;
}
@@ -133,17 +134,20 @@ void trace_add(union trace *trace, u8 type, u16 len)
if (!((1ul << trace->hdr.type) & debug_descriptor.trace_mask))
return;
- trace->hdr.timestamp = mftb();
- trace->hdr.cpu = this_cpu()->server_no;
+ trace->hdr.timestamp = cpu_to_be64(mftb());
+ trace->hdr.cpu = cpu_to_be16(this_cpu()->server_no);
lock(&ti->lock);
/* Throw away old entries before we overwrite them. */
- while ((ti->tb.start + ti->tb.mask + 1) < (ti->tb.end + tsz)) {
+ while ((be64_to_cpu(ti->tb.start) + be64_to_cpu(ti->tb.mask) + 1)
+ < (be64_to_cpu(ti->tb.end) + tsz)) {
struct trace_hdr *hdr;
- hdr = (void *)ti->tb.buf + (ti->tb.start & ti->tb.mask);
- ti->tb.start += hdr->len_div_8 << 3;
+ hdr = (void *)ti->tb.buf +
+ be64_to_cpu(ti->tb.start & ti->tb.mask);
+ ti->tb.start = cpu_to_be64(be64_to_cpu(ti->tb.start) +
+ (hdr->len_div_8 << 3));
}
/* Must update ->start before we rewrite new entries. */
@@ -152,10 +156,11 @@ void trace_add(union trace *trace, u8 type, u16 len)
/* Check for duplicates... */
if (!handle_repeat(&ti->tb, trace)) {
/* This may go off end, and that's why ti->tb.buf is oversize */
- memcpy(ti->tb.buf + (ti->tb.end & ti->tb.mask), trace, tsz);
+ memcpy(ti->tb.buf + be64_to_cpu(ti->tb.end & ti->tb.mask),
+ trace, tsz);
ti->tb.last = ti->tb.end;
lwsync(); /* write barrier: write entry before exposing */
- ti->tb.end += tsz;
+ ti->tb.end = cpu_to_be64(be64_to_cpu(ti->tb.end) + tsz);
}
unlock(&ti->lock);
}
@@ -218,8 +223,8 @@ void init_trace_buffers(void)
any = t->trace;
memset(t->trace, 0, size);
init_lock(&t->trace->lock);
- t->trace->tb.mask = TBUF_SZ - 1;
- t->trace->tb.max_size = MAX_SIZE;
+ t->trace->tb.mask = cpu_to_be64(TBUF_SZ - 1);
+ t->trace->tb.max_size = cpu_to_be32(MAX_SIZE);
trace_add_desc(any, sizeof(t->trace->tb) +
tracebuf_extra());
} else